2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
29 #include "dm_services_types.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "dc/dc_edid_parser.h"
38 #include "amdgpu_dm_trace.h"
42 #include "amdgpu_display.h"
43 #include "amdgpu_ucode.h"
45 #include "amdgpu_dm.h"
46 #ifdef CONFIG_DRM_AMD_DC_HDCP
47 #include "amdgpu_dm_hdcp.h"
48 #include <drm/drm_hdcp.h>
50 #include "amdgpu_pm.h"
52 #include "amd_shared.h"
53 #include "amdgpu_dm_irq.h"
54 #include "dm_helpers.h"
55 #include "amdgpu_dm_mst_types.h"
56 #if defined(CONFIG_DEBUG_FS)
57 #include "amdgpu_dm_debugfs.h"
60 #include "ivsrcid/ivsrcid_vislands30.h"
62 #include <linux/module.h>
63 #include <linux/moduleparam.h>
64 #include <linux/types.h>
65 #include <linux/pm_runtime.h>
66 #include <linux/pci.h>
67 #include <linux/firmware.h>
68 #include <linux/component.h>
70 #include <drm/drm_atomic.h>
71 #include <drm/drm_atomic_uapi.h>
72 #include <drm/drm_atomic_helper.h>
73 #include <drm/drm_dp_mst_helper.h>
74 #include <drm/drm_fb_helper.h>
75 #include <drm/drm_fourcc.h>
76 #include <drm/drm_edid.h>
77 #include <drm/drm_vblank.h>
78 #include <drm/drm_audio_component.h>
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
88 #include "soc15_common.h"
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
98 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
99 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
100 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
101 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
103 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
105 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
108 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
109 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
111 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
112 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
114 /* Number of bytes in PSP header for firmware. */
115 #define PSP_HEADER_BYTES 0x100
117 /* Number of bytes in PSP footer for firmware. */
118 #define PSP_FOOTER_BYTES 0x100
123 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
124 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
125 * requests into DC requests, and DC responses into DRM responses.
127 * The root control structure is &struct amdgpu_display_manager.
130 /* basic init/fini API */
131 static int amdgpu_dm_init(struct amdgpu_device *adev);
132 static void amdgpu_dm_fini(struct amdgpu_device *adev);
133 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
135 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
137 switch (link->dpcd_caps.dongle_type) {
138 case DISPLAY_DONGLE_NONE:
139 return DRM_MODE_SUBCONNECTOR_Native;
140 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
141 return DRM_MODE_SUBCONNECTOR_VGA;
142 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
143 case DISPLAY_DONGLE_DP_DVI_DONGLE:
144 return DRM_MODE_SUBCONNECTOR_DVID;
145 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
146 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
147 return DRM_MODE_SUBCONNECTOR_HDMIA;
148 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
150 return DRM_MODE_SUBCONNECTOR_Unknown;
154 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
156 struct dc_link *link = aconnector->dc_link;
157 struct drm_connector *connector = &aconnector->base;
158 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
160 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
163 if (aconnector->dc_sink)
164 subconnector = get_subconnector_type(link);
166 drm_object_property_set_value(&connector->base,
167 connector->dev->mode_config.dp_subconnector_property,
172 * initializes drm_device display related structures, based on the information
173 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
174 * drm_encoder, drm_mode_config
176 * Returns 0 on success
178 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
179 /* removes and deallocates the drm structures, created by the above function */
180 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
182 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
183 struct drm_plane *plane,
184 unsigned long possible_crtcs,
185 const struct dc_plane_cap *plane_cap);
186 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
187 struct drm_plane *plane,
188 uint32_t link_index);
189 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
190 struct amdgpu_dm_connector *amdgpu_dm_connector,
192 struct amdgpu_encoder *amdgpu_encoder);
193 static int amdgpu_dm_encoder_init(struct drm_device *dev,
194 struct amdgpu_encoder *aencoder,
195 uint32_t link_index);
197 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
199 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
201 static int amdgpu_dm_atomic_check(struct drm_device *dev,
202 struct drm_atomic_state *state);
204 static void handle_cursor_update(struct drm_plane *plane,
205 struct drm_plane_state *old_plane_state);
207 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
208 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
209 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
210 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
211 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
213 static const struct drm_format_info *
214 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
217 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
218 struct drm_crtc_state *new_crtc_state);
220 * dm_vblank_get_counter
223 * Get counter for number of vertical blanks
226 * struct amdgpu_device *adev - [in] desired amdgpu device
227 * int disp_idx - [in] which CRTC to get the counter from
230 * Counter for vertical blanks
232 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
234 if (crtc >= adev->mode_info.num_crtc)
237 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
239 if (acrtc->dm_irq_params.stream == NULL) {
240 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
245 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
249 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
250 u32 *vbl, u32 *position)
252 uint32_t v_blank_start, v_blank_end, h_position, v_position;
254 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
257 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
259 if (acrtc->dm_irq_params.stream == NULL) {
260 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
266 * TODO rework base driver to use values directly.
267 * for now parse it back into reg-format
269 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
275 *position = v_position | (h_position << 16);
276 *vbl = v_blank_start | (v_blank_end << 16);
282 static bool dm_is_idle(void *handle)
288 static int dm_wait_for_idle(void *handle)
294 static bool dm_check_soft_reset(void *handle)
299 static int dm_soft_reset(void *handle)
305 static struct amdgpu_crtc *
306 get_crtc_by_otg_inst(struct amdgpu_device *adev,
309 struct drm_device *dev = adev_to_drm(adev);
310 struct drm_crtc *crtc;
311 struct amdgpu_crtc *amdgpu_crtc;
313 if (otg_inst == -1) {
315 return adev->mode_info.crtcs[0];
318 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
319 amdgpu_crtc = to_amdgpu_crtc(crtc);
321 if (amdgpu_crtc->otg_inst == otg_inst)
328 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
330 return acrtc->dm_irq_params.freesync_config.state ==
331 VRR_STATE_ACTIVE_VARIABLE ||
332 acrtc->dm_irq_params.freesync_config.state ==
333 VRR_STATE_ACTIVE_FIXED;
336 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
338 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
339 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
342 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
343 struct dm_crtc_state *new_state)
345 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
347 else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
354 * dm_pflip_high_irq() - Handle pageflip interrupt
355 * @interrupt_params: ignored
357 * Handles the pageflip interrupt by notifying all interested parties
358 * that the pageflip has been completed.
360 static void dm_pflip_high_irq(void *interrupt_params)
362 struct amdgpu_crtc *amdgpu_crtc;
363 struct common_irq_params *irq_params = interrupt_params;
364 struct amdgpu_device *adev = irq_params->adev;
366 struct drm_pending_vblank_event *e;
367 uint32_t vpos, hpos, v_blank_start, v_blank_end;
370 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
372 /* IRQ could occur when in initial stage */
373 /* TODO work and BO cleanup */
374 if (amdgpu_crtc == NULL) {
375 DC_LOG_PFLIP("CRTC is null, returning.\n");
379 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
381 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
382 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
383 amdgpu_crtc->pflip_status,
384 AMDGPU_FLIP_SUBMITTED,
385 amdgpu_crtc->crtc_id,
387 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
391 /* page flip completed. */
392 e = amdgpu_crtc->event;
393 amdgpu_crtc->event = NULL;
398 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
400 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
402 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
403 &v_blank_end, &hpos, &vpos) ||
404 (vpos < v_blank_start)) {
405 /* Update to correct count and vblank timestamp if racing with
406 * vblank irq. This also updates to the correct vblank timestamp
407 * even in VRR mode, as scanout is past the front-porch atm.
409 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
411 /* Wake up userspace by sending the pageflip event with proper
412 * count and timestamp of vblank of flip completion.
415 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
417 /* Event sent, so done with vblank for this flip */
418 drm_crtc_vblank_put(&amdgpu_crtc->base);
421 /* VRR active and inside front-porch: vblank count and
422 * timestamp for pageflip event will only be up to date after
423 * drm_crtc_handle_vblank() has been executed from late vblank
424 * irq handler after start of back-porch (vline 0). We queue the
425 * pageflip event for send-out by drm_crtc_handle_vblank() with
426 * updated timestamp and count, once it runs after us.
428 * We need to open-code this instead of using the helper
429 * drm_crtc_arm_vblank_event(), as that helper would
430 * call drm_crtc_accurate_vblank_count(), which we must
431 * not call in VRR mode while we are in front-porch!
434 /* sequence will be replaced by real count during send-out. */
435 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
436 e->pipe = amdgpu_crtc->crtc_id;
438 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
442 /* Keep track of vblank of this flip for flip throttling. We use the
443 * cooked hw counter, as that one incremented at start of this vblank
444 * of pageflip completion, so last_flip_vblank is the forbidden count
445 * for queueing new pageflips if vsync + VRR is enabled.
447 amdgpu_crtc->dm_irq_params.last_flip_vblank =
448 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
450 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
451 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
453 DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
454 amdgpu_crtc->crtc_id, amdgpu_crtc,
455 vrr_active, (int) !e);
458 static void dm_vupdate_high_irq(void *interrupt_params)
460 struct common_irq_params *irq_params = interrupt_params;
461 struct amdgpu_device *adev = irq_params->adev;
462 struct amdgpu_crtc *acrtc;
463 struct drm_device *drm_dev;
464 struct drm_vblank_crtc *vblank;
465 ktime_t frame_duration_ns, previous_timestamp;
469 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
472 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
473 drm_dev = acrtc->base.dev;
474 vblank = &drm_dev->vblank[acrtc->base.index];
475 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
476 frame_duration_ns = vblank->time - previous_timestamp;
478 if (frame_duration_ns > 0) {
479 trace_amdgpu_refresh_rate_track(acrtc->base.index,
481 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
482 atomic64_set(&irq_params->previous_timestamp, vblank->time);
485 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
489 /* Core vblank handling is done here after end of front-porch in
490 * vrr mode, as vblank timestamping will give valid results
491 * while now done after front-porch. This will also deliver
492 * page-flip completion events that have been queued to us
493 * if a pageflip happened inside front-porch.
496 drm_crtc_handle_vblank(&acrtc->base);
498 /* BTR processing for pre-DCE12 ASICs */
499 if (acrtc->dm_irq_params.stream &&
500 adev->family < AMDGPU_FAMILY_AI) {
501 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
502 mod_freesync_handle_v_update(
503 adev->dm.freesync_module,
504 acrtc->dm_irq_params.stream,
505 &acrtc->dm_irq_params.vrr_params);
507 dc_stream_adjust_vmin_vmax(
509 acrtc->dm_irq_params.stream,
510 &acrtc->dm_irq_params.vrr_params.adjust);
511 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
518 * dm_crtc_high_irq() - Handles CRTC interrupt
519 * @interrupt_params: used for determining the CRTC instance
521 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
524 static void dm_crtc_high_irq(void *interrupt_params)
526 struct common_irq_params *irq_params = interrupt_params;
527 struct amdgpu_device *adev = irq_params->adev;
528 struct amdgpu_crtc *acrtc;
532 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
536 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
538 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
539 vrr_active, acrtc->dm_irq_params.active_planes);
542 * Core vblank handling at start of front-porch is only possible
543 * in non-vrr mode, as only there vblank timestamping will give
544 * valid results while done in front-porch. Otherwise defer it
545 * to dm_vupdate_high_irq after end of front-porch.
548 drm_crtc_handle_vblank(&acrtc->base);
551 * Following stuff must happen at start of vblank, for crc
552 * computation and below-the-range btr support in vrr mode.
554 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
556 /* BTR updates need to happen before VUPDATE on Vega and above. */
557 if (adev->family < AMDGPU_FAMILY_AI)
560 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
562 if (acrtc->dm_irq_params.stream &&
563 acrtc->dm_irq_params.vrr_params.supported &&
564 acrtc->dm_irq_params.freesync_config.state ==
565 VRR_STATE_ACTIVE_VARIABLE) {
566 mod_freesync_handle_v_update(adev->dm.freesync_module,
567 acrtc->dm_irq_params.stream,
568 &acrtc->dm_irq_params.vrr_params);
570 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
571 &acrtc->dm_irq_params.vrr_params.adjust);
575 * If there aren't any active_planes then DCH HUBP may be clock-gated.
576 * In that case, pageflip completion interrupts won't fire and pageflip
577 * completion events won't get delivered. Prevent this by sending
578 * pending pageflip events from here if a flip is still pending.
580 * If any planes are enabled, use dm_pflip_high_irq() instead, to
581 * avoid race conditions between flip programming and completion,
582 * which could cause too early flip completion events.
584 if (adev->family >= AMDGPU_FAMILY_RV &&
585 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
586 acrtc->dm_irq_params.active_planes == 0) {
588 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
590 drm_crtc_vblank_put(&acrtc->base);
592 acrtc->pflip_status = AMDGPU_FLIP_NONE;
595 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
598 #if defined(CONFIG_DRM_AMD_DC_DCN)
600 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
601 * DCN generation ASICs
602 * @interrupt params - interrupt parameters
604 * Used to set crc window/read out crc value at vertical line 0 position
606 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
607 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
609 struct common_irq_params *irq_params = interrupt_params;
610 struct amdgpu_device *adev = irq_params->adev;
611 struct amdgpu_crtc *acrtc;
613 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
618 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
623 static int dm_set_clockgating_state(void *handle,
624 enum amd_clockgating_state state)
629 static int dm_set_powergating_state(void *handle,
630 enum amd_powergating_state state)
635 /* Prototypes of private functions */
636 static int dm_early_init(void* handle);
638 /* Allocate memory for FBC compressed data */
639 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
641 struct drm_device *dev = connector->dev;
642 struct amdgpu_device *adev = drm_to_adev(dev);
643 struct dm_compressor_info *compressor = &adev->dm.compressor;
644 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
645 struct drm_display_mode *mode;
646 unsigned long max_size = 0;
648 if (adev->dm.dc->fbc_compressor == NULL)
651 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
654 if (compressor->bo_ptr)
658 list_for_each_entry(mode, &connector->modes, head) {
659 if (max_size < mode->htotal * mode->vtotal)
660 max_size = mode->htotal * mode->vtotal;
664 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
665 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
666 &compressor->gpu_addr, &compressor->cpu_addr);
669 DRM_ERROR("DM: Failed to initialize FBC\n");
671 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
672 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
679 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
680 int pipe, bool *enabled,
681 unsigned char *buf, int max_bytes)
683 struct drm_device *dev = dev_get_drvdata(kdev);
684 struct amdgpu_device *adev = drm_to_adev(dev);
685 struct drm_connector *connector;
686 struct drm_connector_list_iter conn_iter;
687 struct amdgpu_dm_connector *aconnector;
692 mutex_lock(&adev->dm.audio_lock);
694 drm_connector_list_iter_begin(dev, &conn_iter);
695 drm_for_each_connector_iter(connector, &conn_iter) {
696 aconnector = to_amdgpu_dm_connector(connector);
697 if (aconnector->audio_inst != port)
701 ret = drm_eld_size(connector->eld);
702 memcpy(buf, connector->eld, min(max_bytes, ret));
706 drm_connector_list_iter_end(&conn_iter);
708 mutex_unlock(&adev->dm.audio_lock);
710 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
715 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
716 .get_eld = amdgpu_dm_audio_component_get_eld,
719 static int amdgpu_dm_audio_component_bind(struct device *kdev,
720 struct device *hda_kdev, void *data)
722 struct drm_device *dev = dev_get_drvdata(kdev);
723 struct amdgpu_device *adev = drm_to_adev(dev);
724 struct drm_audio_component *acomp = data;
726 acomp->ops = &amdgpu_dm_audio_component_ops;
728 adev->dm.audio_component = acomp;
733 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
734 struct device *hda_kdev, void *data)
736 struct drm_device *dev = dev_get_drvdata(kdev);
737 struct amdgpu_device *adev = drm_to_adev(dev);
738 struct drm_audio_component *acomp = data;
742 adev->dm.audio_component = NULL;
745 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
746 .bind = amdgpu_dm_audio_component_bind,
747 .unbind = amdgpu_dm_audio_component_unbind,
750 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
757 adev->mode_info.audio.enabled = true;
759 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
761 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
762 adev->mode_info.audio.pin[i].channels = -1;
763 adev->mode_info.audio.pin[i].rate = -1;
764 adev->mode_info.audio.pin[i].bits_per_sample = -1;
765 adev->mode_info.audio.pin[i].status_bits = 0;
766 adev->mode_info.audio.pin[i].category_code = 0;
767 adev->mode_info.audio.pin[i].connected = false;
768 adev->mode_info.audio.pin[i].id =
769 adev->dm.dc->res_pool->audios[i]->inst;
770 adev->mode_info.audio.pin[i].offset = 0;
773 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
777 adev->dm.audio_registered = true;
782 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
787 if (!adev->mode_info.audio.enabled)
790 if (adev->dm.audio_registered) {
791 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
792 adev->dm.audio_registered = false;
795 /* TODO: Disable audio? */
797 adev->mode_info.audio.enabled = false;
800 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
802 struct drm_audio_component *acomp = adev->dm.audio_component;
804 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
805 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
807 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
812 static int dm_dmub_hw_init(struct amdgpu_device *adev)
814 const struct dmcub_firmware_header_v1_0 *hdr;
815 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
816 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
817 const struct firmware *dmub_fw = adev->dm.dmub_fw;
818 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
819 struct abm *abm = adev->dm.dc->res_pool->abm;
820 struct dmub_srv_hw_params hw_params;
821 enum dmub_status status;
822 const unsigned char *fw_inst_const, *fw_bss_data;
823 uint32_t i, fw_inst_const_size, fw_bss_data_size;
827 /* DMUB isn't supported on the ASIC. */
831 DRM_ERROR("No framebuffer info for DMUB service.\n");
836 /* Firmware required for DMUB support. */
837 DRM_ERROR("No firmware provided for DMUB.\n");
841 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
842 if (status != DMUB_STATUS_OK) {
843 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
847 if (!has_hw_support) {
848 DRM_INFO("DMUB unsupported on ASIC\n");
852 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
854 fw_inst_const = dmub_fw->data +
855 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
858 fw_bss_data = dmub_fw->data +
859 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
860 le32_to_cpu(hdr->inst_const_bytes);
862 /* Copy firmware and bios info into FB memory. */
863 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
864 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
866 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
868 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
869 * amdgpu_ucode_init_single_fw will load dmub firmware
870 * fw_inst_const part to cw0; otherwise, the firmware back door load
871 * will be done by dm_dmub_hw_init
873 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
874 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
878 if (fw_bss_data_size)
879 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
880 fw_bss_data, fw_bss_data_size);
882 /* Copy firmware bios info into FB memory. */
883 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
886 /* Reset regions that need to be reset. */
887 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
888 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
890 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
891 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
893 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
894 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
896 /* Initialize hardware. */
897 memset(&hw_params, 0, sizeof(hw_params));
898 hw_params.fb_base = adev->gmc.fb_start;
899 hw_params.fb_offset = adev->gmc.aper_base;
901 /* backdoor load firmware and trigger dmub running */
902 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
903 hw_params.load_inst_const = true;
906 hw_params.psp_version = dmcu->psp_version;
908 for (i = 0; i < fb_info->num_fb; ++i)
909 hw_params.fb[i] = &fb_info->fb[i];
911 status = dmub_srv_hw_init(dmub_srv, &hw_params);
912 if (status != DMUB_STATUS_OK) {
913 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
917 /* Wait for firmware load to finish. */
918 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
919 if (status != DMUB_STATUS_OK)
920 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
922 /* Init DMCU and ABM if available. */
924 dmcu->funcs->dmcu_init(dmcu);
925 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
928 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
929 if (!adev->dm.dc->ctx->dmub_srv) {
930 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
934 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
935 adev->dm.dmcub_fw_version);
940 #if defined(CONFIG_DRM_AMD_DC_DCN)
941 #define DMUB_TRACE_MAX_READ 64
942 static void dm_dmub_trace_high_irq(void *interrupt_params)
944 struct common_irq_params *irq_params = interrupt_params;
945 struct amdgpu_device *adev = irq_params->adev;
946 struct amdgpu_display_manager *dm = &adev->dm;
947 struct dmcub_trace_buf_entry entry = { 0 };
951 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
952 trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
953 entry.param0, entry.param1);
955 DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
956 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
962 } while (count <= DMUB_TRACE_MAX_READ);
964 ASSERT(count <= DMUB_TRACE_MAX_READ);
967 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
970 uint32_t logical_addr_low;
971 uint32_t logical_addr_high;
972 uint32_t agp_base, agp_bot, agp_top;
973 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
975 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
976 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
978 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
980 * Raven2 has a HW issue that it is unable to use the vram which
981 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
982 * workaround that increase system aperture high address (add 1)
983 * to get rid of the VM fault and hardware hang.
985 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
987 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
990 agp_bot = adev->gmc.agp_start >> 24;
991 agp_top = adev->gmc.agp_end >> 24;
994 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
995 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
996 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
997 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
998 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
999 page_table_base.low_part = lower_32_bits(pt_base);
1001 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1002 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1004 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1005 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1006 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1008 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1009 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1010 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1012 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1013 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1014 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1016 pa_config->is_hvm_enabled = 0;
1020 #if defined(CONFIG_DRM_AMD_DC_DCN)
1021 static void event_mall_stutter(struct work_struct *work)
1024 struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
1025 struct amdgpu_display_manager *dm = vblank_work->dm;
1027 mutex_lock(&dm->dc_lock);
1029 if (vblank_work->enable)
1030 dm->active_vblank_irq_count++;
1031 else if(dm->active_vblank_irq_count)
1032 dm->active_vblank_irq_count--;
1034 dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1036 DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1038 mutex_unlock(&dm->dc_lock);
1041 static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
1044 int max_caps = dc->caps.max_links;
1045 struct vblank_workqueue *vblank_work;
1048 vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
1049 if (ZERO_OR_NULL_PTR(vblank_work)) {
1054 for (i = 0; i < max_caps; i++)
1055 INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
1060 static int amdgpu_dm_init(struct amdgpu_device *adev)
1062 struct dc_init_data init_data;
1063 #ifdef CONFIG_DRM_AMD_DC_HDCP
1064 struct dc_callback_init init_params;
1068 adev->dm.ddev = adev_to_drm(adev);
1069 adev->dm.adev = adev;
1071 /* Zero all the fields */
1072 memset(&init_data, 0, sizeof(init_data));
1073 #ifdef CONFIG_DRM_AMD_DC_HDCP
1074 memset(&init_params, 0, sizeof(init_params));
1077 mutex_init(&adev->dm.dc_lock);
1078 mutex_init(&adev->dm.audio_lock);
1079 #if defined(CONFIG_DRM_AMD_DC_DCN)
1080 spin_lock_init(&adev->dm.vblank_lock);
1083 if(amdgpu_dm_irq_init(adev)) {
1084 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1088 init_data.asic_id.chip_family = adev->family;
1090 init_data.asic_id.pci_revision_id = adev->pdev->revision;
1091 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1093 init_data.asic_id.vram_width = adev->gmc.vram_width;
1094 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1095 init_data.asic_id.atombios_base_address =
1096 adev->mode_info.atom_context->bios;
1098 init_data.driver = adev;
1100 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1102 if (!adev->dm.cgs_device) {
1103 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1107 init_data.cgs_device = adev->dm.cgs_device;
1109 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1111 switch (adev->asic_type) {
1116 init_data.flags.gpu_vm_support = true;
1117 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1118 init_data.flags.disable_dmcu = true;
1120 #if defined(CONFIG_DRM_AMD_DC_DCN)
1122 init_data.flags.gpu_vm_support = true;
1129 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1130 init_data.flags.fbc_support = true;
1132 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1133 init_data.flags.multi_mon_pp_mclk_switch = true;
1135 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1136 init_data.flags.disable_fractional_pwm = true;
1138 init_data.flags.power_down_display_on_boot = true;
1140 INIT_LIST_HEAD(&adev->dm.da_list);
1141 /* Display Core create. */
1142 adev->dm.dc = dc_create(&init_data);
1145 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1147 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1151 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1152 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1153 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1156 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1157 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1159 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1160 adev->dm.dc->debug.disable_stutter = true;
1162 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1163 adev->dm.dc->debug.disable_dsc = true;
1165 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1166 adev->dm.dc->debug.disable_clock_gate = true;
1168 r = dm_dmub_hw_init(adev);
1170 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1174 dc_hardware_init(adev->dm.dc);
1176 #if defined(CONFIG_DRM_AMD_DC_DCN)
1177 if (adev->apu_flags) {
1178 struct dc_phy_addr_space_config pa_config;
1180 mmhub_read_system_context(adev, &pa_config);
1182 // Call the DC init_memory func
1183 dc_setup_system_context(adev->dm.dc, &pa_config);
1187 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1188 if (!adev->dm.freesync_module) {
1190 "amdgpu: failed to initialize freesync_module.\n");
1192 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1193 adev->dm.freesync_module);
1195 amdgpu_dm_init_color_mod();
1197 #if defined(CONFIG_DRM_AMD_DC_DCN)
1198 if (adev->dm.dc->caps.max_links > 0) {
1199 adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
1201 if (!adev->dm.vblank_workqueue)
1202 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1204 DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
1208 #ifdef CONFIG_DRM_AMD_DC_HDCP
1209 if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1210 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1212 if (!adev->dm.hdcp_workqueue)
1213 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1215 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1217 dc_init_callbacks(adev->dm.dc, &init_params);
1220 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1221 adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1223 if (amdgpu_dm_initialize_drm_device(adev)) {
1225 "amdgpu: failed to initialize sw for display support.\n");
1229 /* create fake encoders for MST */
1230 dm_dp_create_fake_mst_encoders(adev);
1232 /* TODO: Add_display_info? */
1234 /* TODO use dynamic cursor width */
1235 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1236 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1238 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1240 "amdgpu: failed to initialize sw for display support.\n");
1245 DRM_DEBUG_DRIVER("KMS initialized.\n");
1249 amdgpu_dm_fini(adev);
1254 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1258 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1259 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1262 amdgpu_dm_audio_fini(adev);
1264 amdgpu_dm_destroy_drm_device(&adev->dm);
1266 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1267 if (adev->dm.crc_rd_wrk) {
1268 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1269 kfree(adev->dm.crc_rd_wrk);
1270 adev->dm.crc_rd_wrk = NULL;
1273 #ifdef CONFIG_DRM_AMD_DC_HDCP
1274 if (adev->dm.hdcp_workqueue) {
1275 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1276 adev->dm.hdcp_workqueue = NULL;
1280 dc_deinit_callbacks(adev->dm.dc);
1283 #if defined(CONFIG_DRM_AMD_DC_DCN)
1284 if (adev->dm.vblank_workqueue) {
1285 adev->dm.vblank_workqueue->dm = NULL;
1286 kfree(adev->dm.vblank_workqueue);
1287 adev->dm.vblank_workqueue = NULL;
1291 if (adev->dm.dc->ctx->dmub_srv) {
1292 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1293 adev->dm.dc->ctx->dmub_srv = NULL;
1296 if (adev->dm.dmub_bo)
1297 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1298 &adev->dm.dmub_bo_gpu_addr,
1299 &adev->dm.dmub_bo_cpu_addr);
1301 /* DC Destroy TODO: Replace destroy DAL */
1303 dc_destroy(&adev->dm.dc);
1305 * TODO: pageflip, vlank interrupt
1307 * amdgpu_dm_irq_fini(adev);
1310 if (adev->dm.cgs_device) {
1311 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1312 adev->dm.cgs_device = NULL;
1314 if (adev->dm.freesync_module) {
1315 mod_freesync_destroy(adev->dm.freesync_module);
1316 adev->dm.freesync_module = NULL;
1319 mutex_destroy(&adev->dm.audio_lock);
1320 mutex_destroy(&adev->dm.dc_lock);
1325 static int load_dmcu_fw(struct amdgpu_device *adev)
1327 const char *fw_name_dmcu = NULL;
1329 const struct dmcu_firmware_header_v1_0 *hdr;
1331 switch(adev->asic_type) {
1332 #if defined(CONFIG_DRM_AMD_DC_SI)
1347 case CHIP_POLARIS11:
1348 case CHIP_POLARIS10:
1349 case CHIP_POLARIS12:
1357 case CHIP_SIENNA_CICHLID:
1358 case CHIP_NAVY_FLOUNDER:
1359 case CHIP_DIMGREY_CAVEFISH:
1363 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1366 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1367 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1368 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1369 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1374 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1378 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1379 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1383 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1385 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1386 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1387 adev->dm.fw_dmcu = NULL;
1391 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1396 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1398 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1400 release_firmware(adev->dm.fw_dmcu);
1401 adev->dm.fw_dmcu = NULL;
1405 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1406 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1407 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1408 adev->firmware.fw_size +=
1409 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1411 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1412 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1413 adev->firmware.fw_size +=
1414 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1416 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1418 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1423 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1425 struct amdgpu_device *adev = ctx;
1427 return dm_read_reg(adev->dm.dc->ctx, address);
1430 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1433 struct amdgpu_device *adev = ctx;
1435 return dm_write_reg(adev->dm.dc->ctx, address, value);
1438 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1440 struct dmub_srv_create_params create_params;
1441 struct dmub_srv_region_params region_params;
1442 struct dmub_srv_region_info region_info;
1443 struct dmub_srv_fb_params fb_params;
1444 struct dmub_srv_fb_info *fb_info;
1445 struct dmub_srv *dmub_srv;
1446 const struct dmcub_firmware_header_v1_0 *hdr;
1447 const char *fw_name_dmub;
1448 enum dmub_asic dmub_asic;
1449 enum dmub_status status;
1452 switch (adev->asic_type) {
1454 dmub_asic = DMUB_ASIC_DCN21;
1455 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1456 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1457 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1459 case CHIP_SIENNA_CICHLID:
1460 dmub_asic = DMUB_ASIC_DCN30;
1461 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1463 case CHIP_NAVY_FLOUNDER:
1464 dmub_asic = DMUB_ASIC_DCN30;
1465 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1468 dmub_asic = DMUB_ASIC_DCN301;
1469 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1471 case CHIP_DIMGREY_CAVEFISH:
1472 dmub_asic = DMUB_ASIC_DCN302;
1473 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1477 /* ASIC doesn't support DMUB. */
1481 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1483 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1487 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1489 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1493 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1495 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1496 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1497 AMDGPU_UCODE_ID_DMCUB;
1498 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1500 adev->firmware.fw_size +=
1501 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1503 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1504 adev->dm.dmcub_fw_version);
1507 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1509 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1510 dmub_srv = adev->dm.dmub_srv;
1513 DRM_ERROR("Failed to allocate DMUB service!\n");
1517 memset(&create_params, 0, sizeof(create_params));
1518 create_params.user_ctx = adev;
1519 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1520 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1521 create_params.asic = dmub_asic;
1523 /* Create the DMUB service. */
1524 status = dmub_srv_create(dmub_srv, &create_params);
1525 if (status != DMUB_STATUS_OK) {
1526 DRM_ERROR("Error creating DMUB service: %d\n", status);
1530 /* Calculate the size of all the regions for the DMUB service. */
1531 memset(®ion_params, 0, sizeof(region_params));
1533 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1534 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1535 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1536 region_params.vbios_size = adev->bios_size;
1537 region_params.fw_bss_data = region_params.bss_data_size ?
1538 adev->dm.dmub_fw->data +
1539 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1540 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1541 region_params.fw_inst_const =
1542 adev->dm.dmub_fw->data +
1543 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1546 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
1549 if (status != DMUB_STATUS_OK) {
1550 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1555 * Allocate a framebuffer based on the total size of all the regions.
1556 * TODO: Move this into GART.
1558 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1559 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1560 &adev->dm.dmub_bo_gpu_addr,
1561 &adev->dm.dmub_bo_cpu_addr);
1565 /* Rebase the regions on the framebuffer address. */
1566 memset(&fb_params, 0, sizeof(fb_params));
1567 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1568 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1569 fb_params.region_info = ®ion_info;
1571 adev->dm.dmub_fb_info =
1572 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1573 fb_info = adev->dm.dmub_fb_info;
1577 "Failed to allocate framebuffer info for DMUB service!\n");
1581 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1582 if (status != DMUB_STATUS_OK) {
1583 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1590 static int dm_sw_init(void *handle)
1592 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1595 r = dm_dmub_sw_init(adev);
1599 return load_dmcu_fw(adev);
1602 static int dm_sw_fini(void *handle)
1604 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1606 kfree(adev->dm.dmub_fb_info);
1607 adev->dm.dmub_fb_info = NULL;
1609 if (adev->dm.dmub_srv) {
1610 dmub_srv_destroy(adev->dm.dmub_srv);
1611 adev->dm.dmub_srv = NULL;
1614 release_firmware(adev->dm.dmub_fw);
1615 adev->dm.dmub_fw = NULL;
1617 release_firmware(adev->dm.fw_dmcu);
1618 adev->dm.fw_dmcu = NULL;
1623 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1625 struct amdgpu_dm_connector *aconnector;
1626 struct drm_connector *connector;
1627 struct drm_connector_list_iter iter;
1630 drm_connector_list_iter_begin(dev, &iter);
1631 drm_for_each_connector_iter(connector, &iter) {
1632 aconnector = to_amdgpu_dm_connector(connector);
1633 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1634 aconnector->mst_mgr.aux) {
1635 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1637 aconnector->base.base.id);
1639 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1641 DRM_ERROR("DM_MST: Failed to start MST\n");
1642 aconnector->dc_link->type =
1643 dc_connection_single;
1648 drm_connector_list_iter_end(&iter);
1653 static int dm_late_init(void *handle)
1655 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1657 struct dmcu_iram_parameters params;
1658 unsigned int linear_lut[16];
1660 struct dmcu *dmcu = NULL;
1663 dmcu = adev->dm.dc->res_pool->dmcu;
1665 for (i = 0; i < 16; i++)
1666 linear_lut[i] = 0xFFFF * i / 15;
1669 params.backlight_ramping_start = 0xCCCC;
1670 params.backlight_ramping_reduction = 0xCCCCCCCC;
1671 params.backlight_lut_array_size = 16;
1672 params.backlight_lut_array = linear_lut;
1674 /* Min backlight level after ABM reduction, Don't allow below 1%
1675 * 0xFFFF x 0.01 = 0x28F
1677 params.min_abm_backlight = 0x28F;
1679 /* In the case where abm is implemented on dmcub,
1680 * dmcu object will be null.
1681 * ABM 2.4 and up are implemented on dmcub.
1684 ret = dmcu_load_iram(dmcu, params);
1685 else if (adev->dm.dc->ctx->dmub_srv)
1686 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1691 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1694 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1696 struct amdgpu_dm_connector *aconnector;
1697 struct drm_connector *connector;
1698 struct drm_connector_list_iter iter;
1699 struct drm_dp_mst_topology_mgr *mgr;
1701 bool need_hotplug = false;
1703 drm_connector_list_iter_begin(dev, &iter);
1704 drm_for_each_connector_iter(connector, &iter) {
1705 aconnector = to_amdgpu_dm_connector(connector);
1706 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1707 aconnector->mst_port)
1710 mgr = &aconnector->mst_mgr;
1713 drm_dp_mst_topology_mgr_suspend(mgr);
1715 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1717 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1718 need_hotplug = true;
1722 drm_connector_list_iter_end(&iter);
1725 drm_kms_helper_hotplug_event(dev);
1728 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1730 struct smu_context *smu = &adev->smu;
1733 if (!is_support_sw_smu(adev))
1736 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1737 * on window driver dc implementation.
1738 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1739 * should be passed to smu during boot up and resume from s3.
1740 * boot up: dc calculate dcn watermark clock settings within dc_create,
1741 * dcn20_resource_construct
1742 * then call pplib functions below to pass the settings to smu:
1743 * smu_set_watermarks_for_clock_ranges
1744 * smu_set_watermarks_table
1745 * navi10_set_watermarks_table
1746 * smu_write_watermarks_table
1748 * For Renoir, clock settings of dcn watermark are also fixed values.
1749 * dc has implemented different flow for window driver:
1750 * dc_hardware_init / dc_set_power_state
1755 * smu_set_watermarks_for_clock_ranges
1756 * renoir_set_watermarks_table
1757 * smu_write_watermarks_table
1760 * dc_hardware_init -> amdgpu_dm_init
1761 * dc_set_power_state --> dm_resume
1763 * therefore, this function apply to navi10/12/14 but not Renoir
1766 switch(adev->asic_type) {
1775 ret = smu_write_watermarks_table(smu);
1777 DRM_ERROR("Failed to update WMTABLE!\n");
1785 * dm_hw_init() - Initialize DC device
1786 * @handle: The base driver device containing the amdgpu_dm device.
1788 * Initialize the &struct amdgpu_display_manager device. This involves calling
1789 * the initializers of each DM component, then populating the struct with them.
1791 * Although the function implies hardware initialization, both hardware and
1792 * software are initialized here. Splitting them out to their relevant init
1793 * hooks is a future TODO item.
1795 * Some notable things that are initialized here:
1797 * - Display Core, both software and hardware
1798 * - DC modules that we need (freesync and color management)
1799 * - DRM software states
1800 * - Interrupt sources and handlers
1802 * - Debug FS entries, if enabled
1804 static int dm_hw_init(void *handle)
1806 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1807 /* Create DAL display manager */
1808 amdgpu_dm_init(adev);
1809 amdgpu_dm_hpd_init(adev);
1815 * dm_hw_fini() - Teardown DC device
1816 * @handle: The base driver device containing the amdgpu_dm device.
1818 * Teardown components within &struct amdgpu_display_manager that require
1819 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1820 * were loaded. Also flush IRQ workqueues and disable them.
1822 static int dm_hw_fini(void *handle)
1824 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1826 amdgpu_dm_hpd_fini(adev);
1828 amdgpu_dm_irq_fini(adev);
1829 amdgpu_dm_fini(adev);
1834 static int dm_enable_vblank(struct drm_crtc *crtc);
1835 static void dm_disable_vblank(struct drm_crtc *crtc);
1837 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1838 struct dc_state *state, bool enable)
1840 enum dc_irq_source irq_source;
1841 struct amdgpu_crtc *acrtc;
1845 for (i = 0; i < state->stream_count; i++) {
1846 acrtc = get_crtc_by_otg_inst(
1847 adev, state->stream_status[i].primary_otg_inst);
1849 if (acrtc && state->stream_status[i].plane_count != 0) {
1850 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1851 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1852 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
1853 acrtc->crtc_id, enable ? "en" : "dis", rc);
1855 DRM_WARN("Failed to %s pflip interrupts\n",
1856 enable ? "enable" : "disable");
1859 rc = dm_enable_vblank(&acrtc->base);
1861 DRM_WARN("Failed to enable vblank interrupts\n");
1863 dm_disable_vblank(&acrtc->base);
1871 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1873 struct dc_state *context = NULL;
1874 enum dc_status res = DC_ERROR_UNEXPECTED;
1876 struct dc_stream_state *del_streams[MAX_PIPES];
1877 int del_streams_count = 0;
1879 memset(del_streams, 0, sizeof(del_streams));
1881 context = dc_create_state(dc);
1882 if (context == NULL)
1883 goto context_alloc_fail;
1885 dc_resource_state_copy_construct_current(dc, context);
1887 /* First remove from context all streams */
1888 for (i = 0; i < context->stream_count; i++) {
1889 struct dc_stream_state *stream = context->streams[i];
1891 del_streams[del_streams_count++] = stream;
1894 /* Remove all planes for removed streams and then remove the streams */
1895 for (i = 0; i < del_streams_count; i++) {
1896 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1897 res = DC_FAIL_DETACH_SURFACES;
1901 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1907 res = dc_validate_global_state(dc, context, false);
1910 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1914 res = dc_commit_state(dc, context);
1917 dc_release_state(context);
1923 static int dm_suspend(void *handle)
1925 struct amdgpu_device *adev = handle;
1926 struct amdgpu_display_manager *dm = &adev->dm;
1929 if (amdgpu_in_reset(adev)) {
1930 mutex_lock(&dm->dc_lock);
1932 #if defined(CONFIG_DRM_AMD_DC_DCN)
1933 dc_allow_idle_optimizations(adev->dm.dc, false);
1936 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1938 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1940 amdgpu_dm_commit_zero_streams(dm->dc);
1942 amdgpu_dm_irq_suspend(adev);
1947 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
1948 amdgpu_dm_crtc_secure_display_suspend(adev);
1950 WARN_ON(adev->dm.cached_state);
1951 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1953 s3_handle_mst(adev_to_drm(adev), true);
1955 amdgpu_dm_irq_suspend(adev);
1958 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1963 static struct amdgpu_dm_connector *
1964 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1965 struct drm_crtc *crtc)
1968 struct drm_connector_state *new_con_state;
1969 struct drm_connector *connector;
1970 struct drm_crtc *crtc_from_state;
1972 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1973 crtc_from_state = new_con_state->crtc;
1975 if (crtc_from_state == crtc)
1976 return to_amdgpu_dm_connector(connector);
1982 static void emulated_link_detect(struct dc_link *link)
1984 struct dc_sink_init_data sink_init_data = { 0 };
1985 struct display_sink_capability sink_caps = { 0 };
1986 enum dc_edid_status edid_status;
1987 struct dc_context *dc_ctx = link->ctx;
1988 struct dc_sink *sink = NULL;
1989 struct dc_sink *prev_sink = NULL;
1991 link->type = dc_connection_none;
1992 prev_sink = link->local_sink;
1995 dc_sink_release(prev_sink);
1997 switch (link->connector_signal) {
1998 case SIGNAL_TYPE_HDMI_TYPE_A: {
1999 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2000 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2004 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2005 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2006 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2010 case SIGNAL_TYPE_DVI_DUAL_LINK: {
2011 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2012 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2016 case SIGNAL_TYPE_LVDS: {
2017 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2018 sink_caps.signal = SIGNAL_TYPE_LVDS;
2022 case SIGNAL_TYPE_EDP: {
2023 sink_caps.transaction_type =
2024 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2025 sink_caps.signal = SIGNAL_TYPE_EDP;
2029 case SIGNAL_TYPE_DISPLAY_PORT: {
2030 sink_caps.transaction_type =
2031 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2032 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2037 DC_ERROR("Invalid connector type! signal:%d\n",
2038 link->connector_signal);
2042 sink_init_data.link = link;
2043 sink_init_data.sink_signal = sink_caps.signal;
2045 sink = dc_sink_create(&sink_init_data);
2047 DC_ERROR("Failed to create sink!\n");
2051 /* dc_sink_create returns a new reference */
2052 link->local_sink = sink;
2054 edid_status = dm_helpers_read_local_edid(
2059 if (edid_status != EDID_OK)
2060 DC_ERROR("Failed to read EDID");
2064 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2065 struct amdgpu_display_manager *dm)
2068 struct dc_surface_update surface_updates[MAX_SURFACES];
2069 struct dc_plane_info plane_infos[MAX_SURFACES];
2070 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2071 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2072 struct dc_stream_update stream_update;
2076 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2079 dm_error("Failed to allocate update bundle\n");
2083 for (k = 0; k < dc_state->stream_count; k++) {
2084 bundle->stream_update.stream = dc_state->streams[k];
2086 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2087 bundle->surface_updates[m].surface =
2088 dc_state->stream_status->plane_states[m];
2089 bundle->surface_updates[m].surface->force_full_update =
2092 dc_commit_updates_for_stream(
2093 dm->dc, bundle->surface_updates,
2094 dc_state->stream_status->plane_count,
2095 dc_state->streams[k], &bundle->stream_update, dc_state);
2104 static void dm_set_dpms_off(struct dc_link *link)
2106 struct dc_stream_state *stream_state;
2107 struct amdgpu_dm_connector *aconnector = link->priv;
2108 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2109 struct dc_stream_update stream_update;
2110 bool dpms_off = true;
2112 memset(&stream_update, 0, sizeof(stream_update));
2113 stream_update.dpms_off = &dpms_off;
2115 mutex_lock(&adev->dm.dc_lock);
2116 stream_state = dc_stream_find_from_link(link);
2118 if (stream_state == NULL) {
2119 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2120 mutex_unlock(&adev->dm.dc_lock);
2124 stream_update.stream = stream_state;
2125 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2126 stream_state, &stream_update,
2127 stream_state->ctx->dc->current_state);
2128 mutex_unlock(&adev->dm.dc_lock);
2131 static int dm_resume(void *handle)
2133 struct amdgpu_device *adev = handle;
2134 struct drm_device *ddev = adev_to_drm(adev);
2135 struct amdgpu_display_manager *dm = &adev->dm;
2136 struct amdgpu_dm_connector *aconnector;
2137 struct drm_connector *connector;
2138 struct drm_connector_list_iter iter;
2139 struct drm_crtc *crtc;
2140 struct drm_crtc_state *new_crtc_state;
2141 struct dm_crtc_state *dm_new_crtc_state;
2142 struct drm_plane *plane;
2143 struct drm_plane_state *new_plane_state;
2144 struct dm_plane_state *dm_new_plane_state;
2145 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2146 enum dc_connection_type new_connection_type = dc_connection_none;
2147 struct dc_state *dc_state;
2150 if (amdgpu_in_reset(adev)) {
2151 dc_state = dm->cached_dc_state;
2153 r = dm_dmub_hw_init(adev);
2155 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2157 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2160 amdgpu_dm_irq_resume_early(adev);
2162 for (i = 0; i < dc_state->stream_count; i++) {
2163 dc_state->streams[i]->mode_changed = true;
2164 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2165 dc_state->stream_status->plane_states[j]->update_flags.raw
2170 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2172 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2174 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2176 dc_release_state(dm->cached_dc_state);
2177 dm->cached_dc_state = NULL;
2179 amdgpu_dm_irq_resume_late(adev);
2181 mutex_unlock(&dm->dc_lock);
2185 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2186 dc_release_state(dm_state->context);
2187 dm_state->context = dc_create_state(dm->dc);
2188 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2189 dc_resource_state_construct(dm->dc, dm_state->context);
2191 /* Before powering on DC we need to re-initialize DMUB. */
2192 r = dm_dmub_hw_init(adev);
2194 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2196 /* power on hardware */
2197 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2199 /* program HPD filter */
2203 * early enable HPD Rx IRQ, should be done before set mode as short
2204 * pulse interrupts are used for MST
2206 amdgpu_dm_irq_resume_early(adev);
2208 /* On resume we need to rewrite the MSTM control bits to enable MST*/
2209 s3_handle_mst(ddev, false);
2212 drm_connector_list_iter_begin(ddev, &iter);
2213 drm_for_each_connector_iter(connector, &iter) {
2214 aconnector = to_amdgpu_dm_connector(connector);
2217 * this is the case when traversing through already created
2218 * MST connectors, should be skipped
2220 if (aconnector->mst_port)
2223 mutex_lock(&aconnector->hpd_lock);
2224 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2225 DRM_ERROR("KMS: Failed to detect connector\n");
2227 if (aconnector->base.force && new_connection_type == dc_connection_none)
2228 emulated_link_detect(aconnector->dc_link);
2230 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2232 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2233 aconnector->fake_enable = false;
2235 if (aconnector->dc_sink)
2236 dc_sink_release(aconnector->dc_sink);
2237 aconnector->dc_sink = NULL;
2238 amdgpu_dm_update_connector_after_detect(aconnector);
2239 mutex_unlock(&aconnector->hpd_lock);
2241 drm_connector_list_iter_end(&iter);
2243 /* Force mode set in atomic commit */
2244 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2245 new_crtc_state->active_changed = true;
2248 * atomic_check is expected to create the dc states. We need to release
2249 * them here, since they were duplicated as part of the suspend
2252 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2253 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2254 if (dm_new_crtc_state->stream) {
2255 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2256 dc_stream_release(dm_new_crtc_state->stream);
2257 dm_new_crtc_state->stream = NULL;
2261 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2262 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2263 if (dm_new_plane_state->dc_state) {
2264 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2265 dc_plane_state_release(dm_new_plane_state->dc_state);
2266 dm_new_plane_state->dc_state = NULL;
2270 drm_atomic_helper_resume(ddev, dm->cached_state);
2272 dm->cached_state = NULL;
2274 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
2275 amdgpu_dm_crtc_secure_display_resume(adev);
2278 amdgpu_dm_irq_resume_late(adev);
2280 amdgpu_dm_smu_write_watermarks_table(adev);
2288 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2289 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2290 * the base driver's device list to be initialized and torn down accordingly.
2292 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2295 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2297 .early_init = dm_early_init,
2298 .late_init = dm_late_init,
2299 .sw_init = dm_sw_init,
2300 .sw_fini = dm_sw_fini,
2301 .hw_init = dm_hw_init,
2302 .hw_fini = dm_hw_fini,
2303 .suspend = dm_suspend,
2304 .resume = dm_resume,
2305 .is_idle = dm_is_idle,
2306 .wait_for_idle = dm_wait_for_idle,
2307 .check_soft_reset = dm_check_soft_reset,
2308 .soft_reset = dm_soft_reset,
2309 .set_clockgating_state = dm_set_clockgating_state,
2310 .set_powergating_state = dm_set_powergating_state,
2313 const struct amdgpu_ip_block_version dm_ip_block =
2315 .type = AMD_IP_BLOCK_TYPE_DCE,
2319 .funcs = &amdgpu_dm_funcs,
2329 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2330 .fb_create = amdgpu_display_user_framebuffer_create,
2331 .get_format_info = amd_get_format_info,
2332 .output_poll_changed = drm_fb_helper_output_poll_changed,
2333 .atomic_check = amdgpu_dm_atomic_check,
2334 .atomic_commit = drm_atomic_helper_commit,
2337 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2338 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2341 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2343 u32 max_cll, min_cll, max, min, q, r;
2344 struct amdgpu_dm_backlight_caps *caps;
2345 struct amdgpu_display_manager *dm;
2346 struct drm_connector *conn_base;
2347 struct amdgpu_device *adev;
2348 struct dc_link *link = NULL;
2349 static const u8 pre_computed_values[] = {
2350 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2351 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2353 if (!aconnector || !aconnector->dc_link)
2356 link = aconnector->dc_link;
2357 if (link->connector_signal != SIGNAL_TYPE_EDP)
2360 conn_base = &aconnector->base;
2361 adev = drm_to_adev(conn_base->dev);
2363 caps = &dm->backlight_caps;
2364 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2365 caps->aux_support = false;
2366 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2367 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2369 if (caps->ext_caps->bits.oled == 1 ||
2370 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2371 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2372 caps->aux_support = true;
2374 if (amdgpu_backlight == 0)
2375 caps->aux_support = false;
2376 else if (amdgpu_backlight == 1)
2377 caps->aux_support = true;
2379 /* From the specification (CTA-861-G), for calculating the maximum
2380 * luminance we need to use:
2381 * Luminance = 50*2**(CV/32)
2382 * Where CV is a one-byte value.
2383 * For calculating this expression we may need float point precision;
2384 * to avoid this complexity level, we take advantage that CV is divided
2385 * by a constant. From the Euclids division algorithm, we know that CV
2386 * can be written as: CV = 32*q + r. Next, we replace CV in the
2387 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2388 * need to pre-compute the value of r/32. For pre-computing the values
2389 * We just used the following Ruby line:
2390 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2391 * The results of the above expressions can be verified at
2392 * pre_computed_values.
2396 max = (1 << q) * pre_computed_values[r];
2398 // min luminance: maxLum * (CV/255)^2 / 100
2399 q = DIV_ROUND_CLOSEST(min_cll, 255);
2400 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2402 caps->aux_max_input_signal = max;
2403 caps->aux_min_input_signal = min;
2406 void amdgpu_dm_update_connector_after_detect(
2407 struct amdgpu_dm_connector *aconnector)
2409 struct drm_connector *connector = &aconnector->base;
2410 struct drm_device *dev = connector->dev;
2411 struct dc_sink *sink;
2413 /* MST handled by drm_mst framework */
2414 if (aconnector->mst_mgr.mst_state == true)
2417 sink = aconnector->dc_link->local_sink;
2419 dc_sink_retain(sink);
2422 * Edid mgmt connector gets first update only in mode_valid hook and then
2423 * the connector sink is set to either fake or physical sink depends on link status.
2424 * Skip if already done during boot.
2426 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2427 && aconnector->dc_em_sink) {
2430 * For S3 resume with headless use eml_sink to fake stream
2431 * because on resume connector->sink is set to NULL
2433 mutex_lock(&dev->mode_config.mutex);
2436 if (aconnector->dc_sink) {
2437 amdgpu_dm_update_freesync_caps(connector, NULL);
2439 * retain and release below are used to
2440 * bump up refcount for sink because the link doesn't point
2441 * to it anymore after disconnect, so on next crtc to connector
2442 * reshuffle by UMD we will get into unwanted dc_sink release
2444 dc_sink_release(aconnector->dc_sink);
2446 aconnector->dc_sink = sink;
2447 dc_sink_retain(aconnector->dc_sink);
2448 amdgpu_dm_update_freesync_caps(connector,
2451 amdgpu_dm_update_freesync_caps(connector, NULL);
2452 if (!aconnector->dc_sink) {
2453 aconnector->dc_sink = aconnector->dc_em_sink;
2454 dc_sink_retain(aconnector->dc_sink);
2458 mutex_unlock(&dev->mode_config.mutex);
2461 dc_sink_release(sink);
2466 * TODO: temporary guard to look for proper fix
2467 * if this sink is MST sink, we should not do anything
2469 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2470 dc_sink_release(sink);
2474 if (aconnector->dc_sink == sink) {
2476 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2479 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2480 aconnector->connector_id);
2482 dc_sink_release(sink);
2486 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2487 aconnector->connector_id, aconnector->dc_sink, sink);
2489 mutex_lock(&dev->mode_config.mutex);
2492 * 1. Update status of the drm connector
2493 * 2. Send an event and let userspace tell us what to do
2497 * TODO: check if we still need the S3 mode update workaround.
2498 * If yes, put it here.
2500 if (aconnector->dc_sink) {
2501 amdgpu_dm_update_freesync_caps(connector, NULL);
2502 dc_sink_release(aconnector->dc_sink);
2505 aconnector->dc_sink = sink;
2506 dc_sink_retain(aconnector->dc_sink);
2507 if (sink->dc_edid.length == 0) {
2508 aconnector->edid = NULL;
2509 if (aconnector->dc_link->aux_mode) {
2510 drm_dp_cec_unset_edid(
2511 &aconnector->dm_dp_aux.aux);
2515 (struct edid *)sink->dc_edid.raw_edid;
2517 drm_connector_update_edid_property(connector,
2519 if (aconnector->dc_link->aux_mode)
2520 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2524 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2525 update_connector_ext_caps(aconnector);
2527 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2528 amdgpu_dm_update_freesync_caps(connector, NULL);
2529 drm_connector_update_edid_property(connector, NULL);
2530 aconnector->num_modes = 0;
2531 dc_sink_release(aconnector->dc_sink);
2532 aconnector->dc_sink = NULL;
2533 aconnector->edid = NULL;
2534 #ifdef CONFIG_DRM_AMD_DC_HDCP
2535 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2536 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2537 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2541 mutex_unlock(&dev->mode_config.mutex);
2543 update_subconnector_property(aconnector);
2546 dc_sink_release(sink);
2549 static void handle_hpd_irq(void *param)
2551 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2552 struct drm_connector *connector = &aconnector->base;
2553 struct drm_device *dev = connector->dev;
2554 enum dc_connection_type new_connection_type = dc_connection_none;
2555 #ifdef CONFIG_DRM_AMD_DC_HDCP
2556 struct amdgpu_device *adev = drm_to_adev(dev);
2557 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2561 * In case of failure or MST no need to update connector status or notify the OS
2562 * since (for MST case) MST does this in its own context.
2564 mutex_lock(&aconnector->hpd_lock);
2566 #ifdef CONFIG_DRM_AMD_DC_HDCP
2567 if (adev->dm.hdcp_workqueue) {
2568 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2569 dm_con_state->update_hdcp = true;
2572 if (aconnector->fake_enable)
2573 aconnector->fake_enable = false;
2575 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2576 DRM_ERROR("KMS: Failed to detect connector\n");
2578 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2579 emulated_link_detect(aconnector->dc_link);
2582 drm_modeset_lock_all(dev);
2583 dm_restore_drm_connector_state(dev, connector);
2584 drm_modeset_unlock_all(dev);
2586 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2587 drm_kms_helper_hotplug_event(dev);
2589 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2590 if (new_connection_type == dc_connection_none &&
2591 aconnector->dc_link->type == dc_connection_none)
2592 dm_set_dpms_off(aconnector->dc_link);
2594 amdgpu_dm_update_connector_after_detect(aconnector);
2596 drm_modeset_lock_all(dev);
2597 dm_restore_drm_connector_state(dev, connector);
2598 drm_modeset_unlock_all(dev);
2600 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2601 drm_kms_helper_hotplug_event(dev);
2603 mutex_unlock(&aconnector->hpd_lock);
2607 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2609 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2611 bool new_irq_handled = false;
2613 int dpcd_bytes_to_read;
2615 const int max_process_count = 30;
2616 int process_count = 0;
2618 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2620 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2621 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2622 /* DPCD 0x200 - 0x201 for downstream IRQ */
2623 dpcd_addr = DP_SINK_COUNT;
2625 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2626 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2627 dpcd_addr = DP_SINK_COUNT_ESI;
2630 dret = drm_dp_dpcd_read(
2631 &aconnector->dm_dp_aux.aux,
2634 dpcd_bytes_to_read);
2636 while (dret == dpcd_bytes_to_read &&
2637 process_count < max_process_count) {
2643 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2644 /* handle HPD short pulse irq */
2645 if (aconnector->mst_mgr.mst_state)
2647 &aconnector->mst_mgr,
2651 if (new_irq_handled) {
2652 /* ACK at DPCD to notify down stream */
2653 const int ack_dpcd_bytes_to_write =
2654 dpcd_bytes_to_read - 1;
2656 for (retry = 0; retry < 3; retry++) {
2659 wret = drm_dp_dpcd_write(
2660 &aconnector->dm_dp_aux.aux,
2663 ack_dpcd_bytes_to_write);
2664 if (wret == ack_dpcd_bytes_to_write)
2668 /* check if there is new irq to be handled */
2669 dret = drm_dp_dpcd_read(
2670 &aconnector->dm_dp_aux.aux,
2673 dpcd_bytes_to_read);
2675 new_irq_handled = false;
2681 if (process_count == max_process_count)
2682 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2685 static void handle_hpd_rx_irq(void *param)
2687 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2688 struct drm_connector *connector = &aconnector->base;
2689 struct drm_device *dev = connector->dev;
2690 struct dc_link *dc_link = aconnector->dc_link;
2691 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2692 bool result = false;
2693 enum dc_connection_type new_connection_type = dc_connection_none;
2694 struct amdgpu_device *adev = drm_to_adev(dev);
2695 union hpd_irq_data hpd_irq_data;
2697 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2700 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2701 * conflict, after implement i2c helper, this mutex should be
2704 if (dc_link->type != dc_connection_mst_branch)
2705 mutex_lock(&aconnector->hpd_lock);
2707 read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2709 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2710 (dc_link->type == dc_connection_mst_branch)) {
2711 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2713 dm_handle_hpd_rx_irq(aconnector);
2715 } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2717 dm_handle_hpd_rx_irq(aconnector);
2722 mutex_lock(&adev->dm.dc_lock);
2723 #ifdef CONFIG_DRM_AMD_DC_HDCP
2724 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2726 result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2728 mutex_unlock(&adev->dm.dc_lock);
2731 if (result && !is_mst_root_connector) {
2732 /* Downstream Port status changed. */
2733 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2734 DRM_ERROR("KMS: Failed to detect connector\n");
2736 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2737 emulated_link_detect(dc_link);
2739 if (aconnector->fake_enable)
2740 aconnector->fake_enable = false;
2742 amdgpu_dm_update_connector_after_detect(aconnector);
2745 drm_modeset_lock_all(dev);
2746 dm_restore_drm_connector_state(dev, connector);
2747 drm_modeset_unlock_all(dev);
2749 drm_kms_helper_hotplug_event(dev);
2750 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2752 if (aconnector->fake_enable)
2753 aconnector->fake_enable = false;
2755 amdgpu_dm_update_connector_after_detect(aconnector);
2758 drm_modeset_lock_all(dev);
2759 dm_restore_drm_connector_state(dev, connector);
2760 drm_modeset_unlock_all(dev);
2762 drm_kms_helper_hotplug_event(dev);
2765 #ifdef CONFIG_DRM_AMD_DC_HDCP
2766 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2767 if (adev->dm.hdcp_workqueue)
2768 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2772 if (dc_link->type != dc_connection_mst_branch) {
2773 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2774 mutex_unlock(&aconnector->hpd_lock);
2778 static void register_hpd_handlers(struct amdgpu_device *adev)
2780 struct drm_device *dev = adev_to_drm(adev);
2781 struct drm_connector *connector;
2782 struct amdgpu_dm_connector *aconnector;
2783 const struct dc_link *dc_link;
2784 struct dc_interrupt_params int_params = {0};
2786 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2787 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2789 list_for_each_entry(connector,
2790 &dev->mode_config.connector_list, head) {
2792 aconnector = to_amdgpu_dm_connector(connector);
2793 dc_link = aconnector->dc_link;
2795 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2796 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2797 int_params.irq_source = dc_link->irq_source_hpd;
2799 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2801 (void *) aconnector);
2804 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2806 /* Also register for DP short pulse (hpd_rx). */
2807 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2808 int_params.irq_source = dc_link->irq_source_hpd_rx;
2810 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2812 (void *) aconnector);
2817 #if defined(CONFIG_DRM_AMD_DC_SI)
2818 /* Register IRQ sources and initialize IRQ callbacks */
2819 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2821 struct dc *dc = adev->dm.dc;
2822 struct common_irq_params *c_irq_params;
2823 struct dc_interrupt_params int_params = {0};
2826 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2828 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2829 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2832 * Actions of amdgpu_irq_add_id():
2833 * 1. Register a set() function with base driver.
2834 * Base driver will call set() function to enable/disable an
2835 * interrupt in DC hardware.
2836 * 2. Register amdgpu_dm_irq_handler().
2837 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2838 * coming from DC hardware.
2839 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2840 * for acknowledging and handling. */
2842 /* Use VBLANK interrupt */
2843 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2844 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2846 DRM_ERROR("Failed to add crtc irq id!\n");
2850 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2851 int_params.irq_source =
2852 dc_interrupt_to_irq_source(dc, i+1 , 0);
2854 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2856 c_irq_params->adev = adev;
2857 c_irq_params->irq_src = int_params.irq_source;
2859 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2860 dm_crtc_high_irq, c_irq_params);
2863 /* Use GRPH_PFLIP interrupt */
2864 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2865 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2866 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2868 DRM_ERROR("Failed to add page flip irq id!\n");
2872 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2873 int_params.irq_source =
2874 dc_interrupt_to_irq_source(dc, i, 0);
2876 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2878 c_irq_params->adev = adev;
2879 c_irq_params->irq_src = int_params.irq_source;
2881 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2882 dm_pflip_high_irq, c_irq_params);
2887 r = amdgpu_irq_add_id(adev, client_id,
2888 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2890 DRM_ERROR("Failed to add hpd irq id!\n");
2894 register_hpd_handlers(adev);
2900 /* Register IRQ sources and initialize IRQ callbacks */
2901 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2903 struct dc *dc = adev->dm.dc;
2904 struct common_irq_params *c_irq_params;
2905 struct dc_interrupt_params int_params = {0};
2908 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2910 if (adev->asic_type >= CHIP_VEGA10)
2911 client_id = SOC15_IH_CLIENTID_DCE;
2913 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2914 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2917 * Actions of amdgpu_irq_add_id():
2918 * 1. Register a set() function with base driver.
2919 * Base driver will call set() function to enable/disable an
2920 * interrupt in DC hardware.
2921 * 2. Register amdgpu_dm_irq_handler().
2922 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2923 * coming from DC hardware.
2924 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2925 * for acknowledging and handling. */
2927 /* Use VBLANK interrupt */
2928 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2929 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2931 DRM_ERROR("Failed to add crtc irq id!\n");
2935 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2936 int_params.irq_source =
2937 dc_interrupt_to_irq_source(dc, i, 0);
2939 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2941 c_irq_params->adev = adev;
2942 c_irq_params->irq_src = int_params.irq_source;
2944 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2945 dm_crtc_high_irq, c_irq_params);
2948 /* Use VUPDATE interrupt */
2949 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2950 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2952 DRM_ERROR("Failed to add vupdate irq id!\n");
2956 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2957 int_params.irq_source =
2958 dc_interrupt_to_irq_source(dc, i, 0);
2960 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2962 c_irq_params->adev = adev;
2963 c_irq_params->irq_src = int_params.irq_source;
2965 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2966 dm_vupdate_high_irq, c_irq_params);
2969 /* Use GRPH_PFLIP interrupt */
2970 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2971 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2972 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2974 DRM_ERROR("Failed to add page flip irq id!\n");
2978 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2979 int_params.irq_source =
2980 dc_interrupt_to_irq_source(dc, i, 0);
2982 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2984 c_irq_params->adev = adev;
2985 c_irq_params->irq_src = int_params.irq_source;
2987 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2988 dm_pflip_high_irq, c_irq_params);
2993 r = amdgpu_irq_add_id(adev, client_id,
2994 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2996 DRM_ERROR("Failed to add hpd irq id!\n");
3000 register_hpd_handlers(adev);
3005 #if defined(CONFIG_DRM_AMD_DC_DCN)
3006 /* Register IRQ sources and initialize IRQ callbacks */
3007 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3009 struct dc *dc = adev->dm.dc;
3010 struct common_irq_params *c_irq_params;
3011 struct dc_interrupt_params int_params = {0};
3014 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3015 static const unsigned int vrtl_int_srcid[] = {
3016 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3017 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3018 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3019 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3020 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3021 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3025 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3026 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3029 * Actions of amdgpu_irq_add_id():
3030 * 1. Register a set() function with base driver.
3031 * Base driver will call set() function to enable/disable an
3032 * interrupt in DC hardware.
3033 * 2. Register amdgpu_dm_irq_handler().
3034 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3035 * coming from DC hardware.
3036 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3037 * for acknowledging and handling.
3040 /* Use VSTARTUP interrupt */
3041 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3042 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3044 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3047 DRM_ERROR("Failed to add crtc irq id!\n");
3051 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3052 int_params.irq_source =
3053 dc_interrupt_to_irq_source(dc, i, 0);
3055 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3057 c_irq_params->adev = adev;
3058 c_irq_params->irq_src = int_params.irq_source;
3060 amdgpu_dm_irq_register_interrupt(
3061 adev, &int_params, dm_crtc_high_irq, c_irq_params);
3064 /* Use otg vertical line interrupt */
3065 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3066 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3067 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3068 vrtl_int_srcid[i], &adev->vline0_irq);
3071 DRM_ERROR("Failed to add vline0 irq id!\n");
3075 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3076 int_params.irq_source =
3077 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3079 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3080 DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3084 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3085 - DC_IRQ_SOURCE_DC1_VLINE0];
3087 c_irq_params->adev = adev;
3088 c_irq_params->irq_src = int_params.irq_source;
3090 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3091 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3095 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3096 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3097 * to trigger at end of each vblank, regardless of state of the lock,
3098 * matching DCE behaviour.
3100 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3101 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3103 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3106 DRM_ERROR("Failed to add vupdate irq id!\n");
3110 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3111 int_params.irq_source =
3112 dc_interrupt_to_irq_source(dc, i, 0);
3114 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3116 c_irq_params->adev = adev;
3117 c_irq_params->irq_src = int_params.irq_source;
3119 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3120 dm_vupdate_high_irq, c_irq_params);
3123 /* Use GRPH_PFLIP interrupt */
3124 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3125 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3127 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3129 DRM_ERROR("Failed to add page flip irq id!\n");
3133 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3134 int_params.irq_source =
3135 dc_interrupt_to_irq_source(dc, i, 0);
3137 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3139 c_irq_params->adev = adev;
3140 c_irq_params->irq_src = int_params.irq_source;
3142 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3143 dm_pflip_high_irq, c_irq_params);
3147 if (dc->ctx->dmub_srv) {
3148 i = DCN_1_0__SRCID__DMCUB_OUTBOX_HIGH_PRIORITY_READY_INT;
3149 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->dmub_trace_irq);
3152 DRM_ERROR("Failed to add dmub trace irq id!\n");
3156 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3157 int_params.irq_source =
3158 dc_interrupt_to_irq_source(dc, i, 0);
3160 c_irq_params = &adev->dm.dmub_trace_params[0];
3162 c_irq_params->adev = adev;
3163 c_irq_params->irq_src = int_params.irq_source;
3165 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3166 dm_dmub_trace_high_irq, c_irq_params);
3170 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3173 DRM_ERROR("Failed to add hpd irq id!\n");
3177 register_hpd_handlers(adev);
3184 * Acquires the lock for the atomic state object and returns
3185 * the new atomic state.
3187 * This should only be called during atomic check.
3189 static int dm_atomic_get_state(struct drm_atomic_state *state,
3190 struct dm_atomic_state **dm_state)
3192 struct drm_device *dev = state->dev;
3193 struct amdgpu_device *adev = drm_to_adev(dev);
3194 struct amdgpu_display_manager *dm = &adev->dm;
3195 struct drm_private_state *priv_state;
3200 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3201 if (IS_ERR(priv_state))
3202 return PTR_ERR(priv_state);
3204 *dm_state = to_dm_atomic_state(priv_state);
3209 static struct dm_atomic_state *
3210 dm_atomic_get_new_state(struct drm_atomic_state *state)
3212 struct drm_device *dev = state->dev;
3213 struct amdgpu_device *adev = drm_to_adev(dev);
3214 struct amdgpu_display_manager *dm = &adev->dm;
3215 struct drm_private_obj *obj;
3216 struct drm_private_state *new_obj_state;
3219 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3220 if (obj->funcs == dm->atomic_obj.funcs)
3221 return to_dm_atomic_state(new_obj_state);
3227 static struct drm_private_state *
3228 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3230 struct dm_atomic_state *old_state, *new_state;
3232 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3236 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3238 old_state = to_dm_atomic_state(obj->state);
3240 if (old_state && old_state->context)
3241 new_state->context = dc_copy_state(old_state->context);
3243 if (!new_state->context) {
3248 return &new_state->base;
3251 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3252 struct drm_private_state *state)
3254 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3256 if (dm_state && dm_state->context)
3257 dc_release_state(dm_state->context);
3262 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3263 .atomic_duplicate_state = dm_atomic_duplicate_state,
3264 .atomic_destroy_state = dm_atomic_destroy_state,
3267 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3269 struct dm_atomic_state *state;
3272 adev->mode_info.mode_config_initialized = true;
3274 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3275 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3277 adev_to_drm(adev)->mode_config.max_width = 16384;
3278 adev_to_drm(adev)->mode_config.max_height = 16384;
3280 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3281 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3282 /* indicates support for immediate flip */
3283 adev_to_drm(adev)->mode_config.async_page_flip = true;
3285 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3287 state = kzalloc(sizeof(*state), GFP_KERNEL);
3291 state->context = dc_create_state(adev->dm.dc);
3292 if (!state->context) {
3297 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3299 drm_atomic_private_obj_init(adev_to_drm(adev),
3300 &adev->dm.atomic_obj,
3302 &dm_atomic_state_funcs);
3304 r = amdgpu_display_modeset_create_props(adev);
3306 dc_release_state(state->context);
3311 r = amdgpu_dm_audio_init(adev);
3313 dc_release_state(state->context);
3321 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3322 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3323 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3325 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3326 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3328 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3330 #if defined(CONFIG_ACPI)
3331 struct amdgpu_dm_backlight_caps caps;
3333 memset(&caps, 0, sizeof(caps));
3335 if (dm->backlight_caps.caps_valid)
3338 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3339 if (caps.caps_valid) {
3340 dm->backlight_caps.caps_valid = true;
3341 if (caps.aux_support)
3343 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3344 dm->backlight_caps.max_input_signal = caps.max_input_signal;
3346 dm->backlight_caps.min_input_signal =
3347 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3348 dm->backlight_caps.max_input_signal =
3349 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3352 if (dm->backlight_caps.aux_support)
3355 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3356 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3360 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3361 unsigned *min, unsigned *max)
3366 if (caps->aux_support) {
3367 // Firmware limits are in nits, DC API wants millinits.
3368 *max = 1000 * caps->aux_max_input_signal;
3369 *min = 1000 * caps->aux_min_input_signal;
3371 // Firmware limits are 8-bit, PWM control is 16-bit.
3372 *max = 0x101 * caps->max_input_signal;
3373 *min = 0x101 * caps->min_input_signal;
3378 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3379 uint32_t brightness)
3383 if (!get_brightness_range(caps, &min, &max))
3386 // Rescale 0..255 to min..max
3387 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3388 AMDGPU_MAX_BL_LEVEL);
3391 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3392 uint32_t brightness)
3396 if (!get_brightness_range(caps, &min, &max))
3399 if (brightness < min)
3401 // Rescale min..max to 0..255
3402 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3406 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3408 struct amdgpu_display_manager *dm = bl_get_data(bd);
3409 struct amdgpu_dm_backlight_caps caps;
3410 struct dc_link *link = NULL;
3414 amdgpu_dm_update_backlight_caps(dm);
3415 caps = dm->backlight_caps;
3417 link = (struct dc_link *)dm->backlight_link;
3419 brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3420 // Change brightness based on AUX property
3421 if (caps.aux_support)
3422 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3423 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3425 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3430 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3432 struct amdgpu_display_manager *dm = bl_get_data(bd);
3433 struct amdgpu_dm_backlight_caps caps;
3435 amdgpu_dm_update_backlight_caps(dm);
3436 caps = dm->backlight_caps;
3438 if (caps.aux_support) {
3439 struct dc_link *link = (struct dc_link *)dm->backlight_link;
3443 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3445 return bd->props.brightness;
3446 return convert_brightness_to_user(&caps, avg);
3448 int ret = dc_link_get_backlight_level(dm->backlight_link);
3450 if (ret == DC_ERROR_UNEXPECTED)
3451 return bd->props.brightness;
3452 return convert_brightness_to_user(&caps, ret);
3456 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3457 .options = BL_CORE_SUSPENDRESUME,
3458 .get_brightness = amdgpu_dm_backlight_get_brightness,
3459 .update_status = amdgpu_dm_backlight_update_status,
3463 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3466 struct backlight_properties props = { 0 };
3468 amdgpu_dm_update_backlight_caps(dm);
3470 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3471 props.brightness = AMDGPU_MAX_BL_LEVEL;
3472 props.type = BACKLIGHT_RAW;
3474 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3475 adev_to_drm(dm->adev)->primary->index);
3477 dm->backlight_dev = backlight_device_register(bl_name,
3478 adev_to_drm(dm->adev)->dev,
3480 &amdgpu_dm_backlight_ops,
3483 if (IS_ERR(dm->backlight_dev))
3484 DRM_ERROR("DM: Backlight registration failed!\n");
3486 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3491 static int initialize_plane(struct amdgpu_display_manager *dm,
3492 struct amdgpu_mode_info *mode_info, int plane_id,
3493 enum drm_plane_type plane_type,
3494 const struct dc_plane_cap *plane_cap)
3496 struct drm_plane *plane;
3497 unsigned long possible_crtcs;
3500 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3502 DRM_ERROR("KMS: Failed to allocate plane\n");
3505 plane->type = plane_type;
3508 * HACK: IGT tests expect that the primary plane for a CRTC
3509 * can only have one possible CRTC. Only expose support for
3510 * any CRTC if they're not going to be used as a primary plane
3511 * for a CRTC - like overlay or underlay planes.
3513 possible_crtcs = 1 << plane_id;
3514 if (plane_id >= dm->dc->caps.max_streams)
3515 possible_crtcs = 0xff;
3517 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3520 DRM_ERROR("KMS: Failed to initialize plane\n");
3526 mode_info->planes[plane_id] = plane;
3532 static void register_backlight_device(struct amdgpu_display_manager *dm,
3533 struct dc_link *link)
3535 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3536 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3538 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3539 link->type != dc_connection_none) {
3541 * Event if registration failed, we should continue with
3542 * DM initialization because not having a backlight control
3543 * is better then a black screen.
3545 amdgpu_dm_register_backlight_device(dm);
3547 if (dm->backlight_dev)
3548 dm->backlight_link = link;
3555 * In this architecture, the association
3556 * connector -> encoder -> crtc
3557 * id not really requried. The crtc and connector will hold the
3558 * display_index as an abstraction to use with DAL component
3560 * Returns 0 on success
3562 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3564 struct amdgpu_display_manager *dm = &adev->dm;
3566 struct amdgpu_dm_connector *aconnector = NULL;
3567 struct amdgpu_encoder *aencoder = NULL;
3568 struct amdgpu_mode_info *mode_info = &adev->mode_info;
3570 int32_t primary_planes;
3571 enum dc_connection_type new_connection_type = dc_connection_none;
3572 const struct dc_plane_cap *plane;
3574 dm->display_indexes_num = dm->dc->caps.max_streams;
3575 /* Update the actual used number of crtc */
3576 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3578 link_cnt = dm->dc->caps.max_links;
3579 if (amdgpu_dm_mode_config_init(dm->adev)) {
3580 DRM_ERROR("DM: Failed to initialize mode config\n");
3584 /* There is one primary plane per CRTC */
3585 primary_planes = dm->dc->caps.max_streams;
3586 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3589 * Initialize primary planes, implicit planes for legacy IOCTLS.
3590 * Order is reversed to match iteration order in atomic check.
3592 for (i = (primary_planes - 1); i >= 0; i--) {
3593 plane = &dm->dc->caps.planes[i];
3595 if (initialize_plane(dm, mode_info, i,
3596 DRM_PLANE_TYPE_PRIMARY, plane)) {
3597 DRM_ERROR("KMS: Failed to initialize primary plane\n");
3603 * Initialize overlay planes, index starting after primary planes.
3604 * These planes have a higher DRM index than the primary planes since
3605 * they should be considered as having a higher z-order.
3606 * Order is reversed to match iteration order in atomic check.
3608 * Only support DCN for now, and only expose one so we don't encourage
3609 * userspace to use up all the pipes.
3611 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3612 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3614 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3617 if (!plane->blends_with_above || !plane->blends_with_below)
3620 if (!plane->pixel_format_support.argb8888)
3623 if (initialize_plane(dm, NULL, primary_planes + i,
3624 DRM_PLANE_TYPE_OVERLAY, plane)) {
3625 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3629 /* Only create one overlay plane. */
3633 for (i = 0; i < dm->dc->caps.max_streams; i++)
3634 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3635 DRM_ERROR("KMS: Failed to initialize crtc\n");
3639 /* loops over all connectors on the board */
3640 for (i = 0; i < link_cnt; i++) {
3641 struct dc_link *link = NULL;
3643 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3645 "KMS: Cannot support more than %d display indexes\n",
3646 AMDGPU_DM_MAX_DISPLAY_INDEX);
3650 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3654 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3658 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3659 DRM_ERROR("KMS: Failed to initialize encoder\n");
3663 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3664 DRM_ERROR("KMS: Failed to initialize connector\n");
3668 link = dc_get_link_at_index(dm->dc, i);
3670 if (!dc_link_detect_sink(link, &new_connection_type))
3671 DRM_ERROR("KMS: Failed to detect connector\n");
3673 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3674 emulated_link_detect(link);
3675 amdgpu_dm_update_connector_after_detect(aconnector);
3677 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3678 amdgpu_dm_update_connector_after_detect(aconnector);
3679 register_backlight_device(dm, link);
3680 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3681 amdgpu_dm_set_psr_caps(link);
3687 /* Software is initialized. Now we can register interrupt handlers. */
3688 switch (adev->asic_type) {
3689 #if defined(CONFIG_DRM_AMD_DC_SI)
3694 if (dce60_register_irq_handlers(dm->adev)) {
3695 DRM_ERROR("DM: Failed to initialize IRQ\n");
3709 case CHIP_POLARIS11:
3710 case CHIP_POLARIS10:
3711 case CHIP_POLARIS12:
3716 if (dce110_register_irq_handlers(dm->adev)) {
3717 DRM_ERROR("DM: Failed to initialize IRQ\n");
3721 #if defined(CONFIG_DRM_AMD_DC_DCN)
3727 case CHIP_SIENNA_CICHLID:
3728 case CHIP_NAVY_FLOUNDER:
3729 case CHIP_DIMGREY_CAVEFISH:
3731 if (dcn10_register_irq_handlers(dm->adev)) {
3732 DRM_ERROR("DM: Failed to initialize IRQ\n");
3738 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3750 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3752 drm_mode_config_cleanup(dm->ddev);
3753 drm_atomic_private_obj_fini(&dm->atomic_obj);
3757 /******************************************************************************
3758 * amdgpu_display_funcs functions
3759 *****************************************************************************/
3762 * dm_bandwidth_update - program display watermarks
3764 * @adev: amdgpu_device pointer
3766 * Calculate and program the display watermarks and line buffer allocation.
3768 static void dm_bandwidth_update(struct amdgpu_device *adev)
3770 /* TODO: implement later */
3773 static const struct amdgpu_display_funcs dm_display_funcs = {
3774 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3775 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3776 .backlight_set_level = NULL, /* never called for DC */
3777 .backlight_get_level = NULL, /* never called for DC */
3778 .hpd_sense = NULL,/* called unconditionally */
3779 .hpd_set_polarity = NULL, /* called unconditionally */
3780 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3781 .page_flip_get_scanoutpos =
3782 dm_crtc_get_scanoutpos,/* called unconditionally */
3783 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3784 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3787 #if defined(CONFIG_DEBUG_KERNEL_DC)
3789 static ssize_t s3_debug_store(struct device *device,
3790 struct device_attribute *attr,
3796 struct drm_device *drm_dev = dev_get_drvdata(device);
3797 struct amdgpu_device *adev = drm_to_adev(drm_dev);
3799 ret = kstrtoint(buf, 0, &s3_state);
3804 drm_kms_helper_hotplug_event(adev_to_drm(adev));
3809 return ret == 0 ? count : 0;
3812 DEVICE_ATTR_WO(s3_debug);
3816 static int dm_early_init(void *handle)
3818 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3820 switch (adev->asic_type) {
3821 #if defined(CONFIG_DRM_AMD_DC_SI)
3825 adev->mode_info.num_crtc = 6;
3826 adev->mode_info.num_hpd = 6;
3827 adev->mode_info.num_dig = 6;
3830 adev->mode_info.num_crtc = 2;
3831 adev->mode_info.num_hpd = 2;
3832 adev->mode_info.num_dig = 2;
3837 adev->mode_info.num_crtc = 6;
3838 adev->mode_info.num_hpd = 6;
3839 adev->mode_info.num_dig = 6;
3842 adev->mode_info.num_crtc = 4;
3843 adev->mode_info.num_hpd = 6;
3844 adev->mode_info.num_dig = 7;
3848 adev->mode_info.num_crtc = 2;
3849 adev->mode_info.num_hpd = 6;
3850 adev->mode_info.num_dig = 6;
3854 adev->mode_info.num_crtc = 6;
3855 adev->mode_info.num_hpd = 6;
3856 adev->mode_info.num_dig = 7;
3859 adev->mode_info.num_crtc = 3;
3860 adev->mode_info.num_hpd = 6;
3861 adev->mode_info.num_dig = 9;
3864 adev->mode_info.num_crtc = 2;
3865 adev->mode_info.num_hpd = 6;
3866 adev->mode_info.num_dig = 9;
3868 case CHIP_POLARIS11:
3869 case CHIP_POLARIS12:
3870 adev->mode_info.num_crtc = 5;
3871 adev->mode_info.num_hpd = 5;
3872 adev->mode_info.num_dig = 5;
3874 case CHIP_POLARIS10:
3876 adev->mode_info.num_crtc = 6;
3877 adev->mode_info.num_hpd = 6;
3878 adev->mode_info.num_dig = 6;
3883 adev->mode_info.num_crtc = 6;
3884 adev->mode_info.num_hpd = 6;
3885 adev->mode_info.num_dig = 6;
3887 #if defined(CONFIG_DRM_AMD_DC_DCN)
3891 adev->mode_info.num_crtc = 4;
3892 adev->mode_info.num_hpd = 4;
3893 adev->mode_info.num_dig = 4;
3897 case CHIP_SIENNA_CICHLID:
3898 case CHIP_NAVY_FLOUNDER:
3899 adev->mode_info.num_crtc = 6;
3900 adev->mode_info.num_hpd = 6;
3901 adev->mode_info.num_dig = 6;
3904 case CHIP_DIMGREY_CAVEFISH:
3905 adev->mode_info.num_crtc = 5;
3906 adev->mode_info.num_hpd = 5;
3907 adev->mode_info.num_dig = 5;
3911 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3915 amdgpu_dm_set_irq_funcs(adev);
3917 if (adev->mode_info.funcs == NULL)
3918 adev->mode_info.funcs = &dm_display_funcs;
3921 * Note: Do NOT change adev->audio_endpt_rreg and
3922 * adev->audio_endpt_wreg because they are initialised in
3923 * amdgpu_device_init()
3925 #if defined(CONFIG_DEBUG_KERNEL_DC)
3927 adev_to_drm(adev)->dev,
3928 &dev_attr_s3_debug);
3934 static bool modeset_required(struct drm_crtc_state *crtc_state,
3935 struct dc_stream_state *new_stream,
3936 struct dc_stream_state *old_stream)
3938 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3941 static bool modereset_required(struct drm_crtc_state *crtc_state)
3943 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3946 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3948 drm_encoder_cleanup(encoder);
3952 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3953 .destroy = amdgpu_dm_encoder_destroy,
3957 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
3958 struct drm_framebuffer *fb,
3959 int *min_downscale, int *max_upscale)
3961 struct amdgpu_device *adev = drm_to_adev(dev);
3962 struct dc *dc = adev->dm.dc;
3963 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
3964 struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
3966 switch (fb->format->format) {
3967 case DRM_FORMAT_P010:
3968 case DRM_FORMAT_NV12:
3969 case DRM_FORMAT_NV21:
3970 *max_upscale = plane_cap->max_upscale_factor.nv12;
3971 *min_downscale = plane_cap->max_downscale_factor.nv12;
3974 case DRM_FORMAT_XRGB16161616F:
3975 case DRM_FORMAT_ARGB16161616F:
3976 case DRM_FORMAT_XBGR16161616F:
3977 case DRM_FORMAT_ABGR16161616F:
3978 *max_upscale = plane_cap->max_upscale_factor.fp16;
3979 *min_downscale = plane_cap->max_downscale_factor.fp16;
3983 *max_upscale = plane_cap->max_upscale_factor.argb8888;
3984 *min_downscale = plane_cap->max_downscale_factor.argb8888;
3989 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
3990 * scaling factor of 1.0 == 1000 units.
3992 if (*max_upscale == 1)
3993 *max_upscale = 1000;
3995 if (*min_downscale == 1)
3996 *min_downscale = 1000;
4000 static int fill_dc_scaling_info(const struct drm_plane_state *state,
4001 struct dc_scaling_info *scaling_info)
4003 int scale_w, scale_h, min_downscale, max_upscale;
4005 memset(scaling_info, 0, sizeof(*scaling_info));
4007 /* Source is fixed 16.16 but we ignore mantissa for now... */
4008 scaling_info->src_rect.x = state->src_x >> 16;
4009 scaling_info->src_rect.y = state->src_y >> 16;
4012 * For reasons we don't (yet) fully understand a non-zero
4013 * src_y coordinate into an NV12 buffer can cause a
4014 * system hang. To avoid hangs (and maybe be overly cautious)
4015 * let's reject both non-zero src_x and src_y.
4017 * We currently know of only one use-case to reproduce a
4018 * scenario with non-zero src_x and src_y for NV12, which
4019 * is to gesture the YouTube Android app into full screen
4023 state->fb->format->format == DRM_FORMAT_NV12 &&
4024 (scaling_info->src_rect.x != 0 ||
4025 scaling_info->src_rect.y != 0))
4028 scaling_info->src_rect.width = state->src_w >> 16;
4029 if (scaling_info->src_rect.width == 0)
4032 scaling_info->src_rect.height = state->src_h >> 16;
4033 if (scaling_info->src_rect.height == 0)
4036 scaling_info->dst_rect.x = state->crtc_x;
4037 scaling_info->dst_rect.y = state->crtc_y;
4039 if (state->crtc_w == 0)
4042 scaling_info->dst_rect.width = state->crtc_w;
4044 if (state->crtc_h == 0)
4047 scaling_info->dst_rect.height = state->crtc_h;
4049 /* DRM doesn't specify clipping on destination output. */
4050 scaling_info->clip_rect = scaling_info->dst_rect;
4052 /* Validate scaling per-format with DC plane caps */
4053 if (state->plane && state->plane->dev && state->fb) {
4054 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4055 &min_downscale, &max_upscale);
4057 min_downscale = 250;
4058 max_upscale = 16000;
4061 scale_w = scaling_info->dst_rect.width * 1000 /
4062 scaling_info->src_rect.width;
4064 if (scale_w < min_downscale || scale_w > max_upscale)
4067 scale_h = scaling_info->dst_rect.height * 1000 /
4068 scaling_info->src_rect.height;
4070 if (scale_h < min_downscale || scale_h > max_upscale)
4074 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4075 * assume reasonable defaults based on the format.
4082 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4083 uint64_t tiling_flags)
4085 /* Fill GFX8 params */
4086 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4087 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4089 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4090 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4091 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4092 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4093 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4095 /* XXX fix me for VI */
4096 tiling_info->gfx8.num_banks = num_banks;
4097 tiling_info->gfx8.array_mode =
4098 DC_ARRAY_2D_TILED_THIN1;
4099 tiling_info->gfx8.tile_split = tile_split;
4100 tiling_info->gfx8.bank_width = bankw;
4101 tiling_info->gfx8.bank_height = bankh;
4102 tiling_info->gfx8.tile_aspect = mtaspect;
4103 tiling_info->gfx8.tile_mode =
4104 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4105 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4106 == DC_ARRAY_1D_TILED_THIN1) {
4107 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4110 tiling_info->gfx8.pipe_config =
4111 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4115 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4116 union dc_tiling_info *tiling_info)
4118 tiling_info->gfx9.num_pipes =
4119 adev->gfx.config.gb_addr_config_fields.num_pipes;
4120 tiling_info->gfx9.num_banks =
4121 adev->gfx.config.gb_addr_config_fields.num_banks;
4122 tiling_info->gfx9.pipe_interleave =
4123 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4124 tiling_info->gfx9.num_shader_engines =
4125 adev->gfx.config.gb_addr_config_fields.num_se;
4126 tiling_info->gfx9.max_compressed_frags =
4127 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4128 tiling_info->gfx9.num_rb_per_se =
4129 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4130 tiling_info->gfx9.shaderEnable = 1;
4131 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4132 adev->asic_type == CHIP_NAVY_FLOUNDER ||
4133 adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4134 adev->asic_type == CHIP_VANGOGH)
4135 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4139 validate_dcc(struct amdgpu_device *adev,
4140 const enum surface_pixel_format format,
4141 const enum dc_rotation_angle rotation,
4142 const union dc_tiling_info *tiling_info,
4143 const struct dc_plane_dcc_param *dcc,
4144 const struct dc_plane_address *address,
4145 const struct plane_size *plane_size)
4147 struct dc *dc = adev->dm.dc;
4148 struct dc_dcc_surface_param input;
4149 struct dc_surface_dcc_cap output;
4151 memset(&input, 0, sizeof(input));
4152 memset(&output, 0, sizeof(output));
4157 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4158 !dc->cap_funcs.get_dcc_compression_cap)
4161 input.format = format;
4162 input.surface_size.width = plane_size->surface_size.width;
4163 input.surface_size.height = plane_size->surface_size.height;
4164 input.swizzle_mode = tiling_info->gfx9.swizzle;
4166 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4167 input.scan = SCAN_DIRECTION_HORIZONTAL;
4168 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4169 input.scan = SCAN_DIRECTION_VERTICAL;
4171 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4174 if (!output.capable)
4177 if (dcc->independent_64b_blks == 0 &&
4178 output.grph.rgb.independent_64b_blks != 0)
4185 modifier_has_dcc(uint64_t modifier)
4187 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4191 modifier_gfx9_swizzle_mode(uint64_t modifier)
4193 if (modifier == DRM_FORMAT_MOD_LINEAR)
4196 return AMD_FMT_MOD_GET(TILE, modifier);
4199 static const struct drm_format_info *
4200 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4202 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4206 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4207 union dc_tiling_info *tiling_info,
4210 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4211 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4212 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4213 unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4215 fill_gfx9_tiling_info_from_device(adev, tiling_info);
4217 if (!IS_AMD_FMT_MOD(modifier))
4220 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4221 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4223 if (adev->family >= AMDGPU_FAMILY_NV) {
4224 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4226 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4228 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4232 enum dm_micro_swizzle {
4233 MICRO_SWIZZLE_Z = 0,
4234 MICRO_SWIZZLE_S = 1,
4235 MICRO_SWIZZLE_D = 2,
4239 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4243 struct amdgpu_device *adev = drm_to_adev(plane->dev);
4244 const struct drm_format_info *info = drm_format_info(format);
4246 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4252 * We always have to allow this modifier, because core DRM still
4253 * checks LINEAR support if userspace does not provide modifers.
4255 if (modifier == DRM_FORMAT_MOD_LINEAR)
4259 * The arbitrary tiling support for multiplane formats has not been hooked
4262 if (info->num_planes > 1)
4266 * For D swizzle the canonical modifier depends on the bpp, so check
4269 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4270 adev->family >= AMDGPU_FAMILY_NV) {
4271 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4275 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4279 if (modifier_has_dcc(modifier)) {
4280 /* Per radeonsi comments 16/64 bpp are more complicated. */
4281 if (info->cpp[0] != 4)
4289 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4294 if (*cap - *size < 1) {
4295 uint64_t new_cap = *cap * 2;
4296 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4304 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4310 (*mods)[*size] = mod;
4315 add_gfx9_modifiers(const struct amdgpu_device *adev,
4316 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4318 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4319 int pipe_xor_bits = min(8, pipes +
4320 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4321 int bank_xor_bits = min(8 - pipe_xor_bits,
4322 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4323 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4324 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4327 if (adev->family == AMDGPU_FAMILY_RV) {
4328 /* Raven2 and later */
4329 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4332 * No _D DCC swizzles yet because we only allow 32bpp, which
4333 * doesn't support _D on DCN
4336 if (has_constant_encode) {
4337 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4338 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4339 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4340 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4341 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4342 AMD_FMT_MOD_SET(DCC, 1) |
4343 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4344 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4345 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4348 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4349 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4350 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4351 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4352 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4353 AMD_FMT_MOD_SET(DCC, 1) |
4354 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4355 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4356 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4358 if (has_constant_encode) {
4359 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4360 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4361 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4362 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4363 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4364 AMD_FMT_MOD_SET(DCC, 1) |
4365 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4366 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4367 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4369 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4370 AMD_FMT_MOD_SET(RB, rb) |
4371 AMD_FMT_MOD_SET(PIPE, pipes));
4374 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4375 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4376 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4377 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4378 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4379 AMD_FMT_MOD_SET(DCC, 1) |
4380 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4381 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4382 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4383 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4384 AMD_FMT_MOD_SET(RB, rb) |
4385 AMD_FMT_MOD_SET(PIPE, pipes));
4389 * Only supported for 64bpp on Raven, will be filtered on format in
4390 * dm_plane_format_mod_supported.
4392 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4393 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4394 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4395 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4396 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4398 if (adev->family == AMDGPU_FAMILY_RV) {
4399 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4400 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4401 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4402 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4403 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4407 * Only supported for 64bpp on Raven, will be filtered on format in
4408 * dm_plane_format_mod_supported.
4410 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4411 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4412 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4414 if (adev->family == AMDGPU_FAMILY_RV) {
4415 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4416 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4417 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4422 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4423 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4425 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4427 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4428 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4429 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4430 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4431 AMD_FMT_MOD_SET(DCC, 1) |
4432 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4433 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4434 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4436 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4437 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4438 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4439 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4440 AMD_FMT_MOD_SET(DCC, 1) |
4441 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4442 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4443 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4444 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4446 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4447 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4448 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4449 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4451 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4452 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4453 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4454 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4457 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4458 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4459 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4460 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4462 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4463 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4464 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4468 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4469 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4471 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4472 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4474 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4475 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4476 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4477 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4478 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4479 AMD_FMT_MOD_SET(DCC, 1) |
4480 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4481 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4482 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4483 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4485 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4486 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4487 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4488 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4489 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4490 AMD_FMT_MOD_SET(DCC, 1) |
4491 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4492 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4493 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4494 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4495 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4497 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4498 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4499 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4500 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4501 AMD_FMT_MOD_SET(PACKERS, pkrs));
4503 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4504 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4505 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4506 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4507 AMD_FMT_MOD_SET(PACKERS, pkrs));
4509 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4510 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4511 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4512 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4514 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4515 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4516 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4520 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4522 uint64_t size = 0, capacity = 128;
4525 /* We have not hooked up any pre-GFX9 modifiers. */
4526 if (adev->family < AMDGPU_FAMILY_AI)
4529 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4531 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4532 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4533 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4534 return *mods ? 0 : -ENOMEM;
4537 switch (adev->family) {
4538 case AMDGPU_FAMILY_AI:
4539 case AMDGPU_FAMILY_RV:
4540 add_gfx9_modifiers(adev, mods, &size, &capacity);
4542 case AMDGPU_FAMILY_NV:
4543 case AMDGPU_FAMILY_VGH:
4544 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4545 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4547 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4551 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4553 /* INVALID marks the end of the list. */
4554 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4563 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4564 const struct amdgpu_framebuffer *afb,
4565 const enum surface_pixel_format format,
4566 const enum dc_rotation_angle rotation,
4567 const struct plane_size *plane_size,
4568 union dc_tiling_info *tiling_info,
4569 struct dc_plane_dcc_param *dcc,
4570 struct dc_plane_address *address,
4571 const bool force_disable_dcc)
4573 const uint64_t modifier = afb->base.modifier;
4576 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4577 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4579 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4580 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4583 dcc->meta_pitch = afb->base.pitches[1];
4584 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4586 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4587 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4590 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4598 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4599 const struct amdgpu_framebuffer *afb,
4600 const enum surface_pixel_format format,
4601 const enum dc_rotation_angle rotation,
4602 const uint64_t tiling_flags,
4603 union dc_tiling_info *tiling_info,
4604 struct plane_size *plane_size,
4605 struct dc_plane_dcc_param *dcc,
4606 struct dc_plane_address *address,
4608 bool force_disable_dcc)
4610 const struct drm_framebuffer *fb = &afb->base;
4613 memset(tiling_info, 0, sizeof(*tiling_info));
4614 memset(plane_size, 0, sizeof(*plane_size));
4615 memset(dcc, 0, sizeof(*dcc));
4616 memset(address, 0, sizeof(*address));
4618 address->tmz_surface = tmz_surface;
4620 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4621 uint64_t addr = afb->address + fb->offsets[0];
4623 plane_size->surface_size.x = 0;
4624 plane_size->surface_size.y = 0;
4625 plane_size->surface_size.width = fb->width;
4626 plane_size->surface_size.height = fb->height;
4627 plane_size->surface_pitch =
4628 fb->pitches[0] / fb->format->cpp[0];
4630 address->type = PLN_ADDR_TYPE_GRAPHICS;
4631 address->grph.addr.low_part = lower_32_bits(addr);
4632 address->grph.addr.high_part = upper_32_bits(addr);
4633 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4634 uint64_t luma_addr = afb->address + fb->offsets[0];
4635 uint64_t chroma_addr = afb->address + fb->offsets[1];
4637 plane_size->surface_size.x = 0;
4638 plane_size->surface_size.y = 0;
4639 plane_size->surface_size.width = fb->width;
4640 plane_size->surface_size.height = fb->height;
4641 plane_size->surface_pitch =
4642 fb->pitches[0] / fb->format->cpp[0];
4644 plane_size->chroma_size.x = 0;
4645 plane_size->chroma_size.y = 0;
4646 /* TODO: set these based on surface format */
4647 plane_size->chroma_size.width = fb->width / 2;
4648 plane_size->chroma_size.height = fb->height / 2;
4650 plane_size->chroma_pitch =
4651 fb->pitches[1] / fb->format->cpp[1];
4653 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4654 address->video_progressive.luma_addr.low_part =
4655 lower_32_bits(luma_addr);
4656 address->video_progressive.luma_addr.high_part =
4657 upper_32_bits(luma_addr);
4658 address->video_progressive.chroma_addr.low_part =
4659 lower_32_bits(chroma_addr);
4660 address->video_progressive.chroma_addr.high_part =
4661 upper_32_bits(chroma_addr);
4664 if (adev->family >= AMDGPU_FAMILY_AI) {
4665 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4666 rotation, plane_size,
4673 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4680 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4681 bool *per_pixel_alpha, bool *global_alpha,
4682 int *global_alpha_value)
4684 *per_pixel_alpha = false;
4685 *global_alpha = false;
4686 *global_alpha_value = 0xff;
4688 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4691 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4692 static const uint32_t alpha_formats[] = {
4693 DRM_FORMAT_ARGB8888,
4694 DRM_FORMAT_RGBA8888,
4695 DRM_FORMAT_ABGR8888,
4697 uint32_t format = plane_state->fb->format->format;
4700 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4701 if (format == alpha_formats[i]) {
4702 *per_pixel_alpha = true;
4708 if (plane_state->alpha < 0xffff) {
4709 *global_alpha = true;
4710 *global_alpha_value = plane_state->alpha >> 8;
4715 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4716 const enum surface_pixel_format format,
4717 enum dc_color_space *color_space)
4721 *color_space = COLOR_SPACE_SRGB;
4723 /* DRM color properties only affect non-RGB formats. */
4724 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4727 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4729 switch (plane_state->color_encoding) {
4730 case DRM_COLOR_YCBCR_BT601:
4732 *color_space = COLOR_SPACE_YCBCR601;
4734 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4737 case DRM_COLOR_YCBCR_BT709:
4739 *color_space = COLOR_SPACE_YCBCR709;
4741 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4744 case DRM_COLOR_YCBCR_BT2020:
4746 *color_space = COLOR_SPACE_2020_YCBCR;
4759 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4760 const struct drm_plane_state *plane_state,
4761 const uint64_t tiling_flags,
4762 struct dc_plane_info *plane_info,
4763 struct dc_plane_address *address,
4765 bool force_disable_dcc)
4767 const struct drm_framebuffer *fb = plane_state->fb;
4768 const struct amdgpu_framebuffer *afb =
4769 to_amdgpu_framebuffer(plane_state->fb);
4772 memset(plane_info, 0, sizeof(*plane_info));
4774 switch (fb->format->format) {
4776 plane_info->format =
4777 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4779 case DRM_FORMAT_RGB565:
4780 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4782 case DRM_FORMAT_XRGB8888:
4783 case DRM_FORMAT_ARGB8888:
4784 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4786 case DRM_FORMAT_XRGB2101010:
4787 case DRM_FORMAT_ARGB2101010:
4788 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4790 case DRM_FORMAT_XBGR2101010:
4791 case DRM_FORMAT_ABGR2101010:
4792 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4794 case DRM_FORMAT_XBGR8888:
4795 case DRM_FORMAT_ABGR8888:
4796 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4798 case DRM_FORMAT_NV21:
4799 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4801 case DRM_FORMAT_NV12:
4802 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4804 case DRM_FORMAT_P010:
4805 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4807 case DRM_FORMAT_XRGB16161616F:
4808 case DRM_FORMAT_ARGB16161616F:
4809 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4811 case DRM_FORMAT_XBGR16161616F:
4812 case DRM_FORMAT_ABGR16161616F:
4813 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4817 "Unsupported screen format %p4cc\n",
4818 &fb->format->format);
4822 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4823 case DRM_MODE_ROTATE_0:
4824 plane_info->rotation = ROTATION_ANGLE_0;
4826 case DRM_MODE_ROTATE_90:
4827 plane_info->rotation = ROTATION_ANGLE_90;
4829 case DRM_MODE_ROTATE_180:
4830 plane_info->rotation = ROTATION_ANGLE_180;
4832 case DRM_MODE_ROTATE_270:
4833 plane_info->rotation = ROTATION_ANGLE_270;
4836 plane_info->rotation = ROTATION_ANGLE_0;
4840 plane_info->visible = true;
4841 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4843 plane_info->layer_index = 0;
4845 ret = fill_plane_color_attributes(plane_state, plane_info->format,
4846 &plane_info->color_space);
4850 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4851 plane_info->rotation, tiling_flags,
4852 &plane_info->tiling_info,
4853 &plane_info->plane_size,
4854 &plane_info->dcc, address, tmz_surface,
4859 fill_blending_from_plane_state(
4860 plane_state, &plane_info->per_pixel_alpha,
4861 &plane_info->global_alpha, &plane_info->global_alpha_value);
4866 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4867 struct dc_plane_state *dc_plane_state,
4868 struct drm_plane_state *plane_state,
4869 struct drm_crtc_state *crtc_state)
4871 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4872 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4873 struct dc_scaling_info scaling_info;
4874 struct dc_plane_info plane_info;
4876 bool force_disable_dcc = false;
4878 ret = fill_dc_scaling_info(plane_state, &scaling_info);
4882 dc_plane_state->src_rect = scaling_info.src_rect;
4883 dc_plane_state->dst_rect = scaling_info.dst_rect;
4884 dc_plane_state->clip_rect = scaling_info.clip_rect;
4885 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4887 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4888 ret = fill_dc_plane_info_and_addr(adev, plane_state,
4891 &dc_plane_state->address,
4897 dc_plane_state->format = plane_info.format;
4898 dc_plane_state->color_space = plane_info.color_space;
4899 dc_plane_state->format = plane_info.format;
4900 dc_plane_state->plane_size = plane_info.plane_size;
4901 dc_plane_state->rotation = plane_info.rotation;
4902 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4903 dc_plane_state->stereo_format = plane_info.stereo_format;
4904 dc_plane_state->tiling_info = plane_info.tiling_info;
4905 dc_plane_state->visible = plane_info.visible;
4906 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4907 dc_plane_state->global_alpha = plane_info.global_alpha;
4908 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4909 dc_plane_state->dcc = plane_info.dcc;
4910 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4911 dc_plane_state->flip_int_enabled = true;
4914 * Always set input transfer function, since plane state is refreshed
4917 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4924 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4925 const struct dm_connector_state *dm_state,
4926 struct dc_stream_state *stream)
4928 enum amdgpu_rmx_type rmx_type;
4930 struct rect src = { 0 }; /* viewport in composition space*/
4931 struct rect dst = { 0 }; /* stream addressable area */
4933 /* no mode. nothing to be done */
4937 /* Full screen scaling by default */
4938 src.width = mode->hdisplay;
4939 src.height = mode->vdisplay;
4940 dst.width = stream->timing.h_addressable;
4941 dst.height = stream->timing.v_addressable;
4944 rmx_type = dm_state->scaling;
4945 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4946 if (src.width * dst.height <
4947 src.height * dst.width) {
4948 /* height needs less upscaling/more downscaling */
4949 dst.width = src.width *
4950 dst.height / src.height;
4952 /* width needs less upscaling/more downscaling */
4953 dst.height = src.height *
4954 dst.width / src.width;
4956 } else if (rmx_type == RMX_CENTER) {
4960 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4961 dst.y = (stream->timing.v_addressable - dst.height) / 2;
4963 if (dm_state->underscan_enable) {
4964 dst.x += dm_state->underscan_hborder / 2;
4965 dst.y += dm_state->underscan_vborder / 2;
4966 dst.width -= dm_state->underscan_hborder;
4967 dst.height -= dm_state->underscan_vborder;
4974 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
4975 dst.x, dst.y, dst.width, dst.height);
4979 static enum dc_color_depth
4980 convert_color_depth_from_display_info(const struct drm_connector *connector,
4981 bool is_y420, int requested_bpc)
4988 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4989 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4991 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4993 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4996 bpc = (uint8_t)connector->display_info.bpc;
4997 /* Assume 8 bpc by default if no bpc is specified. */
4998 bpc = bpc ? bpc : 8;
5001 if (requested_bpc > 0) {
5003 * Cap display bpc based on the user requested value.
5005 * The value for state->max_bpc may not correctly updated
5006 * depending on when the connector gets added to the state
5007 * or if this was called outside of atomic check, so it
5008 * can't be used directly.
5010 bpc = min_t(u8, bpc, requested_bpc);
5012 /* Round down to the nearest even number. */
5013 bpc = bpc - (bpc & 1);
5019 * Temporary Work around, DRM doesn't parse color depth for
5020 * EDID revision before 1.4
5021 * TODO: Fix edid parsing
5023 return COLOR_DEPTH_888;
5025 return COLOR_DEPTH_666;
5027 return COLOR_DEPTH_888;
5029 return COLOR_DEPTH_101010;
5031 return COLOR_DEPTH_121212;
5033 return COLOR_DEPTH_141414;
5035 return COLOR_DEPTH_161616;
5037 return COLOR_DEPTH_UNDEFINED;
5041 static enum dc_aspect_ratio
5042 get_aspect_ratio(const struct drm_display_mode *mode_in)
5044 /* 1-1 mapping, since both enums follow the HDMI spec. */
5045 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5048 static enum dc_color_space
5049 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5051 enum dc_color_space color_space = COLOR_SPACE_SRGB;
5053 switch (dc_crtc_timing->pixel_encoding) {
5054 case PIXEL_ENCODING_YCBCR422:
5055 case PIXEL_ENCODING_YCBCR444:
5056 case PIXEL_ENCODING_YCBCR420:
5059 * 27030khz is the separation point between HDTV and SDTV
5060 * according to HDMI spec, we use YCbCr709 and YCbCr601
5063 if (dc_crtc_timing->pix_clk_100hz > 270300) {
5064 if (dc_crtc_timing->flags.Y_ONLY)
5066 COLOR_SPACE_YCBCR709_LIMITED;
5068 color_space = COLOR_SPACE_YCBCR709;
5070 if (dc_crtc_timing->flags.Y_ONLY)
5072 COLOR_SPACE_YCBCR601_LIMITED;
5074 color_space = COLOR_SPACE_YCBCR601;
5079 case PIXEL_ENCODING_RGB:
5080 color_space = COLOR_SPACE_SRGB;
5091 static bool adjust_colour_depth_from_display_info(
5092 struct dc_crtc_timing *timing_out,
5093 const struct drm_display_info *info)
5095 enum dc_color_depth depth = timing_out->display_color_depth;
5098 normalized_clk = timing_out->pix_clk_100hz / 10;
5099 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5100 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5101 normalized_clk /= 2;
5102 /* Adjusting pix clock following on HDMI spec based on colour depth */
5104 case COLOR_DEPTH_888:
5106 case COLOR_DEPTH_101010:
5107 normalized_clk = (normalized_clk * 30) / 24;
5109 case COLOR_DEPTH_121212:
5110 normalized_clk = (normalized_clk * 36) / 24;
5112 case COLOR_DEPTH_161616:
5113 normalized_clk = (normalized_clk * 48) / 24;
5116 /* The above depths are the only ones valid for HDMI. */
5119 if (normalized_clk <= info->max_tmds_clock) {
5120 timing_out->display_color_depth = depth;
5123 } while (--depth > COLOR_DEPTH_666);
5127 static void fill_stream_properties_from_drm_display_mode(
5128 struct dc_stream_state *stream,
5129 const struct drm_display_mode *mode_in,
5130 const struct drm_connector *connector,
5131 const struct drm_connector_state *connector_state,
5132 const struct dc_stream_state *old_stream,
5135 struct dc_crtc_timing *timing_out = &stream->timing;
5136 const struct drm_display_info *info = &connector->display_info;
5137 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5138 struct hdmi_vendor_infoframe hv_frame;
5139 struct hdmi_avi_infoframe avi_frame;
5141 memset(&hv_frame, 0, sizeof(hv_frame));
5142 memset(&avi_frame, 0, sizeof(avi_frame));
5144 timing_out->h_border_left = 0;
5145 timing_out->h_border_right = 0;
5146 timing_out->v_border_top = 0;
5147 timing_out->v_border_bottom = 0;
5148 /* TODO: un-hardcode */
5149 if (drm_mode_is_420_only(info, mode_in)
5150 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5151 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5152 else if (drm_mode_is_420_also(info, mode_in)
5153 && aconnector->force_yuv420_output)
5154 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5155 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5156 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5157 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5159 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5161 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5162 timing_out->display_color_depth = convert_color_depth_from_display_info(
5164 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5166 timing_out->scan_type = SCANNING_TYPE_NODATA;
5167 timing_out->hdmi_vic = 0;
5170 timing_out->vic = old_stream->timing.vic;
5171 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5172 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5174 timing_out->vic = drm_match_cea_mode(mode_in);
5175 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5176 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5177 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5178 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5181 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5182 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5183 timing_out->vic = avi_frame.video_code;
5184 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5185 timing_out->hdmi_vic = hv_frame.vic;
5188 if (is_freesync_video_mode(mode_in, aconnector)) {
5189 timing_out->h_addressable = mode_in->hdisplay;
5190 timing_out->h_total = mode_in->htotal;
5191 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5192 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5193 timing_out->v_total = mode_in->vtotal;
5194 timing_out->v_addressable = mode_in->vdisplay;
5195 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5196 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5197 timing_out->pix_clk_100hz = mode_in->clock * 10;
5199 timing_out->h_addressable = mode_in->crtc_hdisplay;
5200 timing_out->h_total = mode_in->crtc_htotal;
5201 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5202 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5203 timing_out->v_total = mode_in->crtc_vtotal;
5204 timing_out->v_addressable = mode_in->crtc_vdisplay;
5205 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5206 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5207 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5210 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5212 stream->output_color_space = get_output_color_space(timing_out);
5214 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5215 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5216 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5217 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5218 drm_mode_is_420_also(info, mode_in) &&
5219 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5220 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5221 adjust_colour_depth_from_display_info(timing_out, info);
5226 static void fill_audio_info(struct audio_info *audio_info,
5227 const struct drm_connector *drm_connector,
5228 const struct dc_sink *dc_sink)
5231 int cea_revision = 0;
5232 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5234 audio_info->manufacture_id = edid_caps->manufacturer_id;
5235 audio_info->product_id = edid_caps->product_id;
5237 cea_revision = drm_connector->display_info.cea_rev;
5239 strscpy(audio_info->display_name,
5240 edid_caps->display_name,
5241 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5243 if (cea_revision >= 3) {
5244 audio_info->mode_count = edid_caps->audio_mode_count;
5246 for (i = 0; i < audio_info->mode_count; ++i) {
5247 audio_info->modes[i].format_code =
5248 (enum audio_format_code)
5249 (edid_caps->audio_modes[i].format_code);
5250 audio_info->modes[i].channel_count =
5251 edid_caps->audio_modes[i].channel_count;
5252 audio_info->modes[i].sample_rates.all =
5253 edid_caps->audio_modes[i].sample_rate;
5254 audio_info->modes[i].sample_size =
5255 edid_caps->audio_modes[i].sample_size;
5259 audio_info->flags.all = edid_caps->speaker_flags;
5261 /* TODO: We only check for the progressive mode, check for interlace mode too */
5262 if (drm_connector->latency_present[0]) {
5263 audio_info->video_latency = drm_connector->video_latency[0];
5264 audio_info->audio_latency = drm_connector->audio_latency[0];
5267 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5272 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5273 struct drm_display_mode *dst_mode)
5275 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5276 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5277 dst_mode->crtc_clock = src_mode->crtc_clock;
5278 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5279 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5280 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
5281 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5282 dst_mode->crtc_htotal = src_mode->crtc_htotal;
5283 dst_mode->crtc_hskew = src_mode->crtc_hskew;
5284 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5285 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5286 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5287 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5288 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5292 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5293 const struct drm_display_mode *native_mode,
5296 if (scale_enabled) {
5297 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5298 } else if (native_mode->clock == drm_mode->clock &&
5299 native_mode->htotal == drm_mode->htotal &&
5300 native_mode->vtotal == drm_mode->vtotal) {
5301 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5303 /* no scaling nor amdgpu inserted, no need to patch */
5307 static struct dc_sink *
5308 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5310 struct dc_sink_init_data sink_init_data = { 0 };
5311 struct dc_sink *sink = NULL;
5312 sink_init_data.link = aconnector->dc_link;
5313 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5315 sink = dc_sink_create(&sink_init_data);
5317 DRM_ERROR("Failed to create sink!\n");
5320 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5325 static void set_multisync_trigger_params(
5326 struct dc_stream_state *stream)
5328 struct dc_stream_state *master = NULL;
5330 if (stream->triggered_crtc_reset.enabled) {
5331 master = stream->triggered_crtc_reset.event_source;
5332 stream->triggered_crtc_reset.event =
5333 master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5334 CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5335 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5339 static void set_master_stream(struct dc_stream_state *stream_set[],
5342 int j, highest_rfr = 0, master_stream = 0;
5344 for (j = 0; j < stream_count; j++) {
5345 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5346 int refresh_rate = 0;
5348 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5349 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5350 if (refresh_rate > highest_rfr) {
5351 highest_rfr = refresh_rate;
5356 for (j = 0; j < stream_count; j++) {
5358 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5362 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5365 struct dc_stream_state *stream;
5367 if (context->stream_count < 2)
5369 for (i = 0; i < context->stream_count ; i++) {
5370 if (!context->streams[i])
5373 * TODO: add a function to read AMD VSDB bits and set
5374 * crtc_sync_master.multi_sync_enabled flag
5375 * For now it's set to false
5379 set_master_stream(context->streams, context->stream_count);
5381 for (i = 0; i < context->stream_count ; i++) {
5382 stream = context->streams[i];
5387 set_multisync_trigger_params(stream);
5391 static struct drm_display_mode *
5392 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5393 bool use_probed_modes)
5395 struct drm_display_mode *m, *m_pref = NULL;
5396 u16 current_refresh, highest_refresh;
5397 struct list_head *list_head = use_probed_modes ?
5398 &aconnector->base.probed_modes :
5399 &aconnector->base.modes;
5401 if (aconnector->freesync_vid_base.clock != 0)
5402 return &aconnector->freesync_vid_base;
5404 /* Find the preferred mode */
5405 list_for_each_entry (m, list_head, head) {
5406 if (m->type & DRM_MODE_TYPE_PREFERRED) {
5413 /* Probably an EDID with no preferred mode. Fallback to first entry */
5414 m_pref = list_first_entry_or_null(
5415 &aconnector->base.modes, struct drm_display_mode, head);
5417 DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5422 highest_refresh = drm_mode_vrefresh(m_pref);
5425 * Find the mode with highest refresh rate with same resolution.
5426 * For some monitors, preferred mode is not the mode with highest
5427 * supported refresh rate.
5429 list_for_each_entry (m, list_head, head) {
5430 current_refresh = drm_mode_vrefresh(m);
5432 if (m->hdisplay == m_pref->hdisplay &&
5433 m->vdisplay == m_pref->vdisplay &&
5434 highest_refresh < current_refresh) {
5435 highest_refresh = current_refresh;
5440 aconnector->freesync_vid_base = *m_pref;
5444 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
5445 struct amdgpu_dm_connector *aconnector)
5447 struct drm_display_mode *high_mode;
5450 high_mode = get_highest_refresh_rate_mode(aconnector, false);
5451 if (!high_mode || !mode)
5454 timing_diff = high_mode->vtotal - mode->vtotal;
5456 if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5457 high_mode->hdisplay != mode->hdisplay ||
5458 high_mode->vdisplay != mode->vdisplay ||
5459 high_mode->hsync_start != mode->hsync_start ||
5460 high_mode->hsync_end != mode->hsync_end ||
5461 high_mode->htotal != mode->htotal ||
5462 high_mode->hskew != mode->hskew ||
5463 high_mode->vscan != mode->vscan ||
5464 high_mode->vsync_start - mode->vsync_start != timing_diff ||
5465 high_mode->vsync_end - mode->vsync_end != timing_diff)
5471 static struct dc_stream_state *
5472 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5473 const struct drm_display_mode *drm_mode,
5474 const struct dm_connector_state *dm_state,
5475 const struct dc_stream_state *old_stream,
5478 struct drm_display_mode *preferred_mode = NULL;
5479 struct drm_connector *drm_connector;
5480 const struct drm_connector_state *con_state =
5481 dm_state ? &dm_state->base : NULL;
5482 struct dc_stream_state *stream = NULL;
5483 struct drm_display_mode mode = *drm_mode;
5484 struct drm_display_mode saved_mode;
5485 struct drm_display_mode *freesync_mode = NULL;
5486 bool native_mode_found = false;
5487 bool recalculate_timing = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5489 int preferred_refresh = 0;
5490 #if defined(CONFIG_DRM_AMD_DC_DCN)
5491 struct dsc_dec_dpcd_caps dsc_caps;
5492 uint32_t link_bandwidth_kbps;
5494 struct dc_sink *sink = NULL;
5496 memset(&saved_mode, 0, sizeof(saved_mode));
5498 if (aconnector == NULL) {
5499 DRM_ERROR("aconnector is NULL!\n");
5503 drm_connector = &aconnector->base;
5505 if (!aconnector->dc_sink) {
5506 sink = create_fake_sink(aconnector);
5510 sink = aconnector->dc_sink;
5511 dc_sink_retain(sink);
5514 stream = dc_create_stream_for_sink(sink);
5516 if (stream == NULL) {
5517 DRM_ERROR("Failed to create stream for sink!\n");
5521 stream->dm_stream_context = aconnector;
5523 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5524 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5526 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5527 /* Search for preferred mode */
5528 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5529 native_mode_found = true;
5533 if (!native_mode_found)
5534 preferred_mode = list_first_entry_or_null(
5535 &aconnector->base.modes,
5536 struct drm_display_mode,
5539 mode_refresh = drm_mode_vrefresh(&mode);
5541 if (preferred_mode == NULL) {
5543 * This may not be an error, the use case is when we have no
5544 * usermode calls to reset and set mode upon hotplug. In this
5545 * case, we call set mode ourselves to restore the previous mode
5546 * and the modelist may not be filled in in time.
5548 DRM_DEBUG_DRIVER("No preferred mode found\n");
5550 recalculate_timing |= amdgpu_freesync_vid_mode &&
5551 is_freesync_video_mode(&mode, aconnector);
5552 if (recalculate_timing) {
5553 freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5555 mode = *freesync_mode;
5557 decide_crtc_timing_for_drm_display_mode(
5558 &mode, preferred_mode,
5559 dm_state ? (dm_state->scaling != RMX_OFF) : false);
5562 preferred_refresh = drm_mode_vrefresh(preferred_mode);
5565 if (recalculate_timing)
5566 drm_mode_set_crtcinfo(&saved_mode, 0);
5568 drm_mode_set_crtcinfo(&mode, 0);
5571 * If scaling is enabled and refresh rate didn't change
5572 * we copy the vic and polarities of the old timings
5574 if (!recalculate_timing || mode_refresh != preferred_refresh)
5575 fill_stream_properties_from_drm_display_mode(
5576 stream, &mode, &aconnector->base, con_state, NULL,
5579 fill_stream_properties_from_drm_display_mode(
5580 stream, &mode, &aconnector->base, con_state, old_stream,
5583 stream->timing.flags.DSC = 0;
5585 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5586 #if defined(CONFIG_DRM_AMD_DC_DCN)
5587 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5588 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5589 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5591 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5592 dc_link_get_link_cap(aconnector->dc_link));
5594 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5595 /* Set DSC policy according to dsc_clock_en */
5596 dc_dsc_policy_set_enable_dsc_when_not_needed(
5597 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5599 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5601 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5603 link_bandwidth_kbps,
5605 &stream->timing.dsc_cfg))
5606 stream->timing.flags.DSC = 1;
5607 /* Overwrite the stream flag if DSC is enabled through debugfs */
5608 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5609 stream->timing.flags.DSC = 1;
5611 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5612 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5614 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5615 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5617 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5618 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5623 update_stream_scaling_settings(&mode, dm_state, stream);
5626 &stream->audio_info,
5630 update_stream_signal(stream, sink);
5632 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5633 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5635 if (stream->link->psr_settings.psr_feature_enabled) {
5637 // should decide stream support vsc sdp colorimetry capability
5638 // before building vsc info packet
5640 stream->use_vsc_sdp_for_colorimetry = false;
5641 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5642 stream->use_vsc_sdp_for_colorimetry =
5643 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5645 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5646 stream->use_vsc_sdp_for_colorimetry = true;
5648 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5651 dc_sink_release(sink);
5656 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5658 drm_crtc_cleanup(crtc);
5662 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5663 struct drm_crtc_state *state)
5665 struct dm_crtc_state *cur = to_dm_crtc_state(state);
5667 /* TODO Destroy dc_stream objects are stream object is flattened */
5669 dc_stream_release(cur->stream);
5672 __drm_atomic_helper_crtc_destroy_state(state);
5678 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5680 struct dm_crtc_state *state;
5683 dm_crtc_destroy_state(crtc, crtc->state);
5685 state = kzalloc(sizeof(*state), GFP_KERNEL);
5686 if (WARN_ON(!state))
5689 __drm_atomic_helper_crtc_reset(crtc, &state->base);
5692 static struct drm_crtc_state *
5693 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5695 struct dm_crtc_state *state, *cur;
5697 cur = to_dm_crtc_state(crtc->state);
5699 if (WARN_ON(!crtc->state))
5702 state = kzalloc(sizeof(*state), GFP_KERNEL);
5706 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5709 state->stream = cur->stream;
5710 dc_stream_retain(state->stream);
5713 state->active_planes = cur->active_planes;
5714 state->vrr_infopacket = cur->vrr_infopacket;
5715 state->abm_level = cur->abm_level;
5716 state->vrr_supported = cur->vrr_supported;
5717 state->freesync_config = cur->freesync_config;
5718 state->cm_has_degamma = cur->cm_has_degamma;
5719 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5720 /* TODO Duplicate dc_stream after objects are stream object is flattened */
5722 return &state->base;
5725 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
5726 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
5728 crtc_debugfs_init(crtc);
5734 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5736 enum dc_irq_source irq_source;
5737 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5738 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5741 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5743 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5745 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
5746 acrtc->crtc_id, enable ? "en" : "dis", rc);
5750 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5752 enum dc_irq_source irq_source;
5753 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5754 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5755 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5756 #if defined(CONFIG_DRM_AMD_DC_DCN)
5757 struct amdgpu_display_manager *dm = &adev->dm;
5758 unsigned long flags;
5763 /* vblank irq on -> Only need vupdate irq in vrr mode */
5764 if (amdgpu_dm_vrr_active(acrtc_state))
5765 rc = dm_set_vupdate_irq(crtc, true);
5767 /* vblank irq off -> vupdate irq off */
5768 rc = dm_set_vupdate_irq(crtc, false);
5774 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5776 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5779 if (amdgpu_in_reset(adev))
5782 #if defined(CONFIG_DRM_AMD_DC_DCN)
5783 spin_lock_irqsave(&dm->vblank_lock, flags);
5784 dm->vblank_workqueue->dm = dm;
5785 dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
5786 dm->vblank_workqueue->enable = enable;
5787 spin_unlock_irqrestore(&dm->vblank_lock, flags);
5788 schedule_work(&dm->vblank_workqueue->mall_work);
5794 static int dm_enable_vblank(struct drm_crtc *crtc)
5796 return dm_set_vblank(crtc, true);
5799 static void dm_disable_vblank(struct drm_crtc *crtc)
5801 dm_set_vblank(crtc, false);
5804 /* Implemented only the options currently availible for the driver */
5805 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5806 .reset = dm_crtc_reset_state,
5807 .destroy = amdgpu_dm_crtc_destroy,
5808 .set_config = drm_atomic_helper_set_config,
5809 .page_flip = drm_atomic_helper_page_flip,
5810 .atomic_duplicate_state = dm_crtc_duplicate_state,
5811 .atomic_destroy_state = dm_crtc_destroy_state,
5812 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
5813 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5814 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5815 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
5816 .enable_vblank = dm_enable_vblank,
5817 .disable_vblank = dm_disable_vblank,
5818 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5819 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
5820 .late_register = amdgpu_dm_crtc_late_register,
5824 static enum drm_connector_status
5825 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5828 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5832 * 1. This interface is NOT called in context of HPD irq.
5833 * 2. This interface *is called* in context of user-mode ioctl. Which
5834 * makes it a bad place for *any* MST-related activity.
5837 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5838 !aconnector->fake_enable)
5839 connected = (aconnector->dc_sink != NULL);
5841 connected = (aconnector->base.force == DRM_FORCE_ON);
5843 update_subconnector_property(aconnector);
5845 return (connected ? connector_status_connected :
5846 connector_status_disconnected);
5849 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5850 struct drm_connector_state *connector_state,
5851 struct drm_property *property,
5854 struct drm_device *dev = connector->dev;
5855 struct amdgpu_device *adev = drm_to_adev(dev);
5856 struct dm_connector_state *dm_old_state =
5857 to_dm_connector_state(connector->state);
5858 struct dm_connector_state *dm_new_state =
5859 to_dm_connector_state(connector_state);
5863 if (property == dev->mode_config.scaling_mode_property) {
5864 enum amdgpu_rmx_type rmx_type;
5867 case DRM_MODE_SCALE_CENTER:
5868 rmx_type = RMX_CENTER;
5870 case DRM_MODE_SCALE_ASPECT:
5871 rmx_type = RMX_ASPECT;
5873 case DRM_MODE_SCALE_FULLSCREEN:
5874 rmx_type = RMX_FULL;
5876 case DRM_MODE_SCALE_NONE:
5882 if (dm_old_state->scaling == rmx_type)
5885 dm_new_state->scaling = rmx_type;
5887 } else if (property == adev->mode_info.underscan_hborder_property) {
5888 dm_new_state->underscan_hborder = val;
5890 } else if (property == adev->mode_info.underscan_vborder_property) {
5891 dm_new_state->underscan_vborder = val;
5893 } else if (property == adev->mode_info.underscan_property) {
5894 dm_new_state->underscan_enable = val;
5896 } else if (property == adev->mode_info.abm_level_property) {
5897 dm_new_state->abm_level = val;
5904 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5905 const struct drm_connector_state *state,
5906 struct drm_property *property,
5909 struct drm_device *dev = connector->dev;
5910 struct amdgpu_device *adev = drm_to_adev(dev);
5911 struct dm_connector_state *dm_state =
5912 to_dm_connector_state(state);
5915 if (property == dev->mode_config.scaling_mode_property) {
5916 switch (dm_state->scaling) {
5918 *val = DRM_MODE_SCALE_CENTER;
5921 *val = DRM_MODE_SCALE_ASPECT;
5924 *val = DRM_MODE_SCALE_FULLSCREEN;
5928 *val = DRM_MODE_SCALE_NONE;
5932 } else if (property == adev->mode_info.underscan_hborder_property) {
5933 *val = dm_state->underscan_hborder;
5935 } else if (property == adev->mode_info.underscan_vborder_property) {
5936 *val = dm_state->underscan_vborder;
5938 } else if (property == adev->mode_info.underscan_property) {
5939 *val = dm_state->underscan_enable;
5941 } else if (property == adev->mode_info.abm_level_property) {
5942 *val = dm_state->abm_level;
5949 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5951 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5953 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5956 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5958 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5959 const struct dc_link *link = aconnector->dc_link;
5960 struct amdgpu_device *adev = drm_to_adev(connector->dev);
5961 struct amdgpu_display_manager *dm = &adev->dm;
5964 * Call only if mst_mgr was iniitalized before since it's not done
5965 * for all connector types.
5967 if (aconnector->mst_mgr.dev)
5968 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5970 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5971 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5973 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5974 link->type != dc_connection_none &&
5975 dm->backlight_dev) {
5976 backlight_device_unregister(dm->backlight_dev);
5977 dm->backlight_dev = NULL;
5981 if (aconnector->dc_em_sink)
5982 dc_sink_release(aconnector->dc_em_sink);
5983 aconnector->dc_em_sink = NULL;
5984 if (aconnector->dc_sink)
5985 dc_sink_release(aconnector->dc_sink);
5986 aconnector->dc_sink = NULL;
5988 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5989 drm_connector_unregister(connector);
5990 drm_connector_cleanup(connector);
5991 if (aconnector->i2c) {
5992 i2c_del_adapter(&aconnector->i2c->base);
5993 kfree(aconnector->i2c);
5995 kfree(aconnector->dm_dp_aux.aux.name);
6000 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6002 struct dm_connector_state *state =
6003 to_dm_connector_state(connector->state);
6005 if (connector->state)
6006 __drm_atomic_helper_connector_destroy_state(connector->state);
6010 state = kzalloc(sizeof(*state), GFP_KERNEL);
6013 state->scaling = RMX_OFF;
6014 state->underscan_enable = false;
6015 state->underscan_hborder = 0;
6016 state->underscan_vborder = 0;
6017 state->base.max_requested_bpc = 8;
6018 state->vcpi_slots = 0;
6020 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6021 state->abm_level = amdgpu_dm_abm_level;
6023 __drm_atomic_helper_connector_reset(connector, &state->base);
6027 struct drm_connector_state *
6028 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6030 struct dm_connector_state *state =
6031 to_dm_connector_state(connector->state);
6033 struct dm_connector_state *new_state =
6034 kmemdup(state, sizeof(*state), GFP_KERNEL);
6039 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6041 new_state->freesync_capable = state->freesync_capable;
6042 new_state->abm_level = state->abm_level;
6043 new_state->scaling = state->scaling;
6044 new_state->underscan_enable = state->underscan_enable;
6045 new_state->underscan_hborder = state->underscan_hborder;
6046 new_state->underscan_vborder = state->underscan_vborder;
6047 new_state->vcpi_slots = state->vcpi_slots;
6048 new_state->pbn = state->pbn;
6049 return &new_state->base;
6053 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6055 struct amdgpu_dm_connector *amdgpu_dm_connector =
6056 to_amdgpu_dm_connector(connector);
6059 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6060 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6061 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6062 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6067 #if defined(CONFIG_DEBUG_FS)
6068 connector_debugfs_init(amdgpu_dm_connector);
6074 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6075 .reset = amdgpu_dm_connector_funcs_reset,
6076 .detect = amdgpu_dm_connector_detect,
6077 .fill_modes = drm_helper_probe_single_connector_modes,
6078 .destroy = amdgpu_dm_connector_destroy,
6079 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6080 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6081 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6082 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6083 .late_register = amdgpu_dm_connector_late_register,
6084 .early_unregister = amdgpu_dm_connector_unregister
6087 static int get_modes(struct drm_connector *connector)
6089 return amdgpu_dm_connector_get_modes(connector);
6092 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6094 struct dc_sink_init_data init_params = {
6095 .link = aconnector->dc_link,
6096 .sink_signal = SIGNAL_TYPE_VIRTUAL
6100 if (!aconnector->base.edid_blob_ptr) {
6101 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6102 aconnector->base.name);
6104 aconnector->base.force = DRM_FORCE_OFF;
6105 aconnector->base.override_edid = false;
6109 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6111 aconnector->edid = edid;
6113 aconnector->dc_em_sink = dc_link_add_remote_sink(
6114 aconnector->dc_link,
6116 (edid->extensions + 1) * EDID_LENGTH,
6119 if (aconnector->base.force == DRM_FORCE_ON) {
6120 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6121 aconnector->dc_link->local_sink :
6122 aconnector->dc_em_sink;
6123 dc_sink_retain(aconnector->dc_sink);
6127 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6129 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6132 * In case of headless boot with force on for DP managed connector
6133 * Those settings have to be != 0 to get initial modeset
6135 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6136 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6137 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6141 aconnector->base.override_edid = true;
6142 create_eml_sink(aconnector);
6145 static struct dc_stream_state *
6146 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6147 const struct drm_display_mode *drm_mode,
6148 const struct dm_connector_state *dm_state,
6149 const struct dc_stream_state *old_stream)
6151 struct drm_connector *connector = &aconnector->base;
6152 struct amdgpu_device *adev = drm_to_adev(connector->dev);
6153 struct dc_stream_state *stream;
6154 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6155 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6156 enum dc_status dc_result = DC_OK;
6159 stream = create_stream_for_sink(aconnector, drm_mode,
6160 dm_state, old_stream,
6162 if (stream == NULL) {
6163 DRM_ERROR("Failed to create stream for sink!\n");
6167 dc_result = dc_validate_stream(adev->dm.dc, stream);
6169 if (dc_result != DC_OK) {
6170 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6175 dc_status_to_str(dc_result));
6177 dc_stream_release(stream);
6179 requested_bpc -= 2; /* lower bpc to retry validation */
6182 } while (stream == NULL && requested_bpc >= 6);
6184 if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6185 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6187 aconnector->force_yuv420_output = true;
6188 stream = create_validate_stream_for_sink(aconnector, drm_mode,
6189 dm_state, old_stream);
6190 aconnector->force_yuv420_output = false;
6196 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6197 struct drm_display_mode *mode)
6199 int result = MODE_ERROR;
6200 struct dc_sink *dc_sink;
6201 /* TODO: Unhardcode stream count */
6202 struct dc_stream_state *stream;
6203 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6205 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6206 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6210 * Only run this the first time mode_valid is called to initilialize
6213 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6214 !aconnector->dc_em_sink)
6215 handle_edid_mgmt(aconnector);
6217 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6219 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6220 aconnector->base.force != DRM_FORCE_ON) {
6221 DRM_ERROR("dc_sink is NULL!\n");
6225 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6227 dc_stream_release(stream);
6232 /* TODO: error handling*/
6236 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6237 struct dc_info_packet *out)
6239 struct hdmi_drm_infoframe frame;
6240 unsigned char buf[30]; /* 26 + 4 */
6244 memset(out, 0, sizeof(*out));
6246 if (!state->hdr_output_metadata)
6249 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6253 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6257 /* Static metadata is a fixed 26 bytes + 4 byte header. */
6261 /* Prepare the infopacket for DC. */
6262 switch (state->connector->connector_type) {
6263 case DRM_MODE_CONNECTOR_HDMIA:
6264 out->hb0 = 0x87; /* type */
6265 out->hb1 = 0x01; /* version */
6266 out->hb2 = 0x1A; /* length */
6267 out->sb[0] = buf[3]; /* checksum */
6271 case DRM_MODE_CONNECTOR_DisplayPort:
6272 case DRM_MODE_CONNECTOR_eDP:
6273 out->hb0 = 0x00; /* sdp id, zero */
6274 out->hb1 = 0x87; /* type */
6275 out->hb2 = 0x1D; /* payload len - 1 */
6276 out->hb3 = (0x13 << 2); /* sdp version */
6277 out->sb[0] = 0x01; /* version */
6278 out->sb[1] = 0x1A; /* length */
6286 memcpy(&out->sb[i], &buf[4], 26);
6289 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6290 sizeof(out->sb), false);
6296 is_hdr_metadata_different(const struct drm_connector_state *old_state,
6297 const struct drm_connector_state *new_state)
6299 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
6300 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
6302 if (old_blob != new_blob) {
6303 if (old_blob && new_blob &&
6304 old_blob->length == new_blob->length)
6305 return memcmp(old_blob->data, new_blob->data,
6315 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6316 struct drm_atomic_state *state)
6318 struct drm_connector_state *new_con_state =
6319 drm_atomic_get_new_connector_state(state, conn);
6320 struct drm_connector_state *old_con_state =
6321 drm_atomic_get_old_connector_state(state, conn);
6322 struct drm_crtc *crtc = new_con_state->crtc;
6323 struct drm_crtc_state *new_crtc_state;
6326 trace_amdgpu_dm_connector_atomic_check(new_con_state);
6331 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
6332 struct dc_info_packet hdr_infopacket;
6334 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6338 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6339 if (IS_ERR(new_crtc_state))
6340 return PTR_ERR(new_crtc_state);
6343 * DC considers the stream backends changed if the
6344 * static metadata changes. Forcing the modeset also
6345 * gives a simple way for userspace to switch from
6346 * 8bpc to 10bpc when setting the metadata to enter
6349 * Changing the static metadata after it's been
6350 * set is permissible, however. So only force a
6351 * modeset if we're entering or exiting HDR.
6353 new_crtc_state->mode_changed =
6354 !old_con_state->hdr_output_metadata ||
6355 !new_con_state->hdr_output_metadata;
6361 static const struct drm_connector_helper_funcs
6362 amdgpu_dm_connector_helper_funcs = {
6364 * If hotplugging a second bigger display in FB Con mode, bigger resolution
6365 * modes will be filtered by drm_mode_validate_size(), and those modes
6366 * are missing after user start lightdm. So we need to renew modes list.
6367 * in get_modes call back, not just return the modes count
6369 .get_modes = get_modes,
6370 .mode_valid = amdgpu_dm_connector_mode_valid,
6371 .atomic_check = amdgpu_dm_connector_atomic_check,
6374 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6378 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6380 struct drm_atomic_state *state = new_crtc_state->state;
6381 struct drm_plane *plane;
6384 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6385 struct drm_plane_state *new_plane_state;
6387 /* Cursor planes are "fake". */
6388 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6391 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6393 if (!new_plane_state) {
6395 * The plane is enable on the CRTC and hasn't changed
6396 * state. This means that it previously passed
6397 * validation and is therefore enabled.
6403 /* We need a framebuffer to be considered enabled. */
6404 num_active += (new_plane_state->fb != NULL);
6410 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6411 struct drm_crtc_state *new_crtc_state)
6413 struct dm_crtc_state *dm_new_crtc_state =
6414 to_dm_crtc_state(new_crtc_state);
6416 dm_new_crtc_state->active_planes = 0;
6418 if (!dm_new_crtc_state->stream)
6421 dm_new_crtc_state->active_planes =
6422 count_crtc_active_planes(new_crtc_state);
6425 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6426 struct drm_atomic_state *state)
6428 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6430 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6431 struct dc *dc = adev->dm.dc;
6432 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6435 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6437 dm_update_crtc_active_planes(crtc, crtc_state);
6439 if (unlikely(!dm_crtc_state->stream &&
6440 modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6446 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6447 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6448 * planes are disabled, which is not supported by the hardware. And there is legacy
6449 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6451 if (crtc_state->enable &&
6452 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6453 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6457 /* In some use cases, like reset, no stream is attached */
6458 if (!dm_crtc_state->stream)
6461 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6464 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6468 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6469 const struct drm_display_mode *mode,
6470 struct drm_display_mode *adjusted_mode)
6475 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6476 .disable = dm_crtc_helper_disable,
6477 .atomic_check = dm_crtc_helper_atomic_check,
6478 .mode_fixup = dm_crtc_helper_mode_fixup,
6479 .get_scanout_position = amdgpu_crtc_get_scanout_position,
6482 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6487 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6489 switch (display_color_depth) {
6490 case COLOR_DEPTH_666:
6492 case COLOR_DEPTH_888:
6494 case COLOR_DEPTH_101010:
6496 case COLOR_DEPTH_121212:
6498 case COLOR_DEPTH_141414:
6500 case COLOR_DEPTH_161616:
6508 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6509 struct drm_crtc_state *crtc_state,
6510 struct drm_connector_state *conn_state)
6512 struct drm_atomic_state *state = crtc_state->state;
6513 struct drm_connector *connector = conn_state->connector;
6514 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6515 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6516 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6517 struct drm_dp_mst_topology_mgr *mst_mgr;
6518 struct drm_dp_mst_port *mst_port;
6519 enum dc_color_depth color_depth;
6521 bool is_y420 = false;
6523 if (!aconnector->port || !aconnector->dc_sink)
6526 mst_port = aconnector->port;
6527 mst_mgr = &aconnector->mst_port->mst_mgr;
6529 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6532 if (!state->duplicated) {
6533 int max_bpc = conn_state->max_requested_bpc;
6534 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6535 aconnector->force_yuv420_output;
6536 color_depth = convert_color_depth_from_display_info(connector,
6539 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6540 clock = adjusted_mode->clock;
6541 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6543 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6546 dm_new_connector_state->pbn,
6547 dm_mst_get_pbn_divider(aconnector->dc_link));
6548 if (dm_new_connector_state->vcpi_slots < 0) {
6549 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6550 return dm_new_connector_state->vcpi_slots;
6555 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6556 .disable = dm_encoder_helper_disable,
6557 .atomic_check = dm_encoder_helper_atomic_check
6560 #if defined(CONFIG_DRM_AMD_DC_DCN)
6561 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6562 struct dc_state *dc_state)
6564 struct dc_stream_state *stream = NULL;
6565 struct drm_connector *connector;
6566 struct drm_connector_state *new_con_state, *old_con_state;
6567 struct amdgpu_dm_connector *aconnector;
6568 struct dm_connector_state *dm_conn_state;
6569 int i, j, clock, bpp;
6570 int vcpi, pbn_div, pbn = 0;
6572 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6574 aconnector = to_amdgpu_dm_connector(connector);
6576 if (!aconnector->port)
6579 if (!new_con_state || !new_con_state->crtc)
6582 dm_conn_state = to_dm_connector_state(new_con_state);
6584 for (j = 0; j < dc_state->stream_count; j++) {
6585 stream = dc_state->streams[j];
6589 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6598 if (stream->timing.flags.DSC != 1) {
6599 drm_dp_mst_atomic_enable_dsc(state,
6607 pbn_div = dm_mst_get_pbn_divider(stream->link);
6608 bpp = stream->timing.dsc_cfg.bits_per_pixel;
6609 clock = stream->timing.pix_clk_100hz / 10;
6610 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6611 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6618 dm_conn_state->pbn = pbn;
6619 dm_conn_state->vcpi_slots = vcpi;
6625 static void dm_drm_plane_reset(struct drm_plane *plane)
6627 struct dm_plane_state *amdgpu_state = NULL;
6630 plane->funcs->atomic_destroy_state(plane, plane->state);
6632 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6633 WARN_ON(amdgpu_state == NULL);
6636 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6639 static struct drm_plane_state *
6640 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6642 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6644 old_dm_plane_state = to_dm_plane_state(plane->state);
6645 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6646 if (!dm_plane_state)
6649 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6651 if (old_dm_plane_state->dc_state) {
6652 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6653 dc_plane_state_retain(dm_plane_state->dc_state);
6656 return &dm_plane_state->base;
6659 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6660 struct drm_plane_state *state)
6662 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6664 if (dm_plane_state->dc_state)
6665 dc_plane_state_release(dm_plane_state->dc_state);
6667 drm_atomic_helper_plane_destroy_state(plane, state);
6670 static const struct drm_plane_funcs dm_plane_funcs = {
6671 .update_plane = drm_atomic_helper_update_plane,
6672 .disable_plane = drm_atomic_helper_disable_plane,
6673 .destroy = drm_primary_helper_destroy,
6674 .reset = dm_drm_plane_reset,
6675 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6676 .atomic_destroy_state = dm_drm_plane_destroy_state,
6677 .format_mod_supported = dm_plane_format_mod_supported,
6680 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6681 struct drm_plane_state *new_state)
6683 struct amdgpu_framebuffer *afb;
6684 struct drm_gem_object *obj;
6685 struct amdgpu_device *adev;
6686 struct amdgpu_bo *rbo;
6687 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6688 struct list_head list;
6689 struct ttm_validate_buffer tv;
6690 struct ww_acquire_ctx ticket;
6694 if (!new_state->fb) {
6695 DRM_DEBUG_KMS("No FB bound\n");
6699 afb = to_amdgpu_framebuffer(new_state->fb);
6700 obj = new_state->fb->obj[0];
6701 rbo = gem_to_amdgpu_bo(obj);
6702 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6703 INIT_LIST_HEAD(&list);
6707 list_add(&tv.head, &list);
6709 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6711 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6715 if (plane->type != DRM_PLANE_TYPE_CURSOR)
6716 domain = amdgpu_display_supported_domains(adev, rbo->flags);
6718 domain = AMDGPU_GEM_DOMAIN_VRAM;
6720 r = amdgpu_bo_pin(rbo, domain);
6721 if (unlikely(r != 0)) {
6722 if (r != -ERESTARTSYS)
6723 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6724 ttm_eu_backoff_reservation(&ticket, &list);
6728 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6729 if (unlikely(r != 0)) {
6730 amdgpu_bo_unpin(rbo);
6731 ttm_eu_backoff_reservation(&ticket, &list);
6732 DRM_ERROR("%p bind failed\n", rbo);
6736 ttm_eu_backoff_reservation(&ticket, &list);
6738 afb->address = amdgpu_bo_gpu_offset(rbo);
6743 * We don't do surface updates on planes that have been newly created,
6744 * but we also don't have the afb->address during atomic check.
6746 * Fill in buffer attributes depending on the address here, but only on
6747 * newly created planes since they're not being used by DC yet and this
6748 * won't modify global state.
6750 dm_plane_state_old = to_dm_plane_state(plane->state);
6751 dm_plane_state_new = to_dm_plane_state(new_state);
6753 if (dm_plane_state_new->dc_state &&
6754 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6755 struct dc_plane_state *plane_state =
6756 dm_plane_state_new->dc_state;
6757 bool force_disable_dcc = !plane_state->dcc.enable;
6759 fill_plane_buffer_attributes(
6760 adev, afb, plane_state->format, plane_state->rotation,
6762 &plane_state->tiling_info, &plane_state->plane_size,
6763 &plane_state->dcc, &plane_state->address,
6764 afb->tmz_surface, force_disable_dcc);
6770 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6771 struct drm_plane_state *old_state)
6773 struct amdgpu_bo *rbo;
6779 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6780 r = amdgpu_bo_reserve(rbo, false);
6782 DRM_ERROR("failed to reserve rbo before unpin\n");
6786 amdgpu_bo_unpin(rbo);
6787 amdgpu_bo_unreserve(rbo);
6788 amdgpu_bo_unref(&rbo);
6791 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6792 struct drm_crtc_state *new_crtc_state)
6794 struct drm_framebuffer *fb = state->fb;
6795 int min_downscale, max_upscale;
6797 int max_scale = INT_MAX;
6799 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6800 if (fb && state->crtc) {
6801 /* Validate viewport to cover the case when only the position changes */
6802 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
6803 int viewport_width = state->crtc_w;
6804 int viewport_height = state->crtc_h;
6806 if (state->crtc_x < 0)
6807 viewport_width += state->crtc_x;
6808 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
6809 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
6811 if (state->crtc_y < 0)
6812 viewport_height += state->crtc_y;
6813 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
6814 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
6816 if (viewport_width < 0 || viewport_height < 0) {
6817 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
6819 } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
6820 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
6822 } else if (viewport_height < MIN_VIEWPORT_SIZE) {
6823 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
6829 /* Get min/max allowed scaling factors from plane caps. */
6830 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6831 &min_downscale, &max_upscale);
6833 * Convert to drm convention: 16.16 fixed point, instead of dc's
6834 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6835 * dst/src, so min_scale = 1.0 / max_upscale, etc.
6837 min_scale = (1000 << 16) / max_upscale;
6838 max_scale = (1000 << 16) / min_downscale;
6841 return drm_atomic_helper_check_plane_state(
6842 state, new_crtc_state, min_scale, max_scale, true, true);
6845 static int dm_plane_atomic_check(struct drm_plane *plane,
6846 struct drm_atomic_state *state)
6848 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
6850 struct amdgpu_device *adev = drm_to_adev(plane->dev);
6851 struct dc *dc = adev->dm.dc;
6852 struct dm_plane_state *dm_plane_state;
6853 struct dc_scaling_info scaling_info;
6854 struct drm_crtc_state *new_crtc_state;
6857 trace_amdgpu_dm_plane_atomic_check(new_plane_state);
6859 dm_plane_state = to_dm_plane_state(new_plane_state);
6861 if (!dm_plane_state->dc_state)
6865 drm_atomic_get_new_crtc_state(state,
6866 new_plane_state->crtc);
6867 if (!new_crtc_state)
6870 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
6874 ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
6878 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6884 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6885 struct drm_atomic_state *state)
6887 /* Only support async updates on cursor planes. */
6888 if (plane->type != DRM_PLANE_TYPE_CURSOR)
6894 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6895 struct drm_atomic_state *state)
6897 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
6899 struct drm_plane_state *old_state =
6900 drm_atomic_get_old_plane_state(state, plane);
6902 trace_amdgpu_dm_atomic_update_cursor(new_state);
6904 swap(plane->state->fb, new_state->fb);
6906 plane->state->src_x = new_state->src_x;
6907 plane->state->src_y = new_state->src_y;
6908 plane->state->src_w = new_state->src_w;
6909 plane->state->src_h = new_state->src_h;
6910 plane->state->crtc_x = new_state->crtc_x;
6911 plane->state->crtc_y = new_state->crtc_y;
6912 plane->state->crtc_w = new_state->crtc_w;
6913 plane->state->crtc_h = new_state->crtc_h;
6915 handle_cursor_update(plane, old_state);
6918 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6919 .prepare_fb = dm_plane_helper_prepare_fb,
6920 .cleanup_fb = dm_plane_helper_cleanup_fb,
6921 .atomic_check = dm_plane_atomic_check,
6922 .atomic_async_check = dm_plane_atomic_async_check,
6923 .atomic_async_update = dm_plane_atomic_async_update
6927 * TODO: these are currently initialized to rgb formats only.
6928 * For future use cases we should either initialize them dynamically based on
6929 * plane capabilities, or initialize this array to all formats, so internal drm
6930 * check will succeed, and let DC implement proper check
6932 static const uint32_t rgb_formats[] = {
6933 DRM_FORMAT_XRGB8888,
6934 DRM_FORMAT_ARGB8888,
6935 DRM_FORMAT_RGBA8888,
6936 DRM_FORMAT_XRGB2101010,
6937 DRM_FORMAT_XBGR2101010,
6938 DRM_FORMAT_ARGB2101010,
6939 DRM_FORMAT_ABGR2101010,
6940 DRM_FORMAT_XBGR8888,
6941 DRM_FORMAT_ABGR8888,
6945 static const uint32_t overlay_formats[] = {
6946 DRM_FORMAT_XRGB8888,
6947 DRM_FORMAT_ARGB8888,
6948 DRM_FORMAT_RGBA8888,
6949 DRM_FORMAT_XBGR8888,
6950 DRM_FORMAT_ABGR8888,
6954 static const u32 cursor_formats[] = {
6958 static int get_plane_formats(const struct drm_plane *plane,
6959 const struct dc_plane_cap *plane_cap,
6960 uint32_t *formats, int max_formats)
6962 int i, num_formats = 0;
6965 * TODO: Query support for each group of formats directly from
6966 * DC plane caps. This will require adding more formats to the
6970 switch (plane->type) {
6971 case DRM_PLANE_TYPE_PRIMARY:
6972 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6973 if (num_formats >= max_formats)
6976 formats[num_formats++] = rgb_formats[i];
6979 if (plane_cap && plane_cap->pixel_format_support.nv12)
6980 formats[num_formats++] = DRM_FORMAT_NV12;
6981 if (plane_cap && plane_cap->pixel_format_support.p010)
6982 formats[num_formats++] = DRM_FORMAT_P010;
6983 if (plane_cap && plane_cap->pixel_format_support.fp16) {
6984 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6985 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6986 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6987 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6991 case DRM_PLANE_TYPE_OVERLAY:
6992 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6993 if (num_formats >= max_formats)
6996 formats[num_formats++] = overlay_formats[i];
7000 case DRM_PLANE_TYPE_CURSOR:
7001 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7002 if (num_formats >= max_formats)
7005 formats[num_formats++] = cursor_formats[i];
7013 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7014 struct drm_plane *plane,
7015 unsigned long possible_crtcs,
7016 const struct dc_plane_cap *plane_cap)
7018 uint32_t formats[32];
7021 unsigned int supported_rotations;
7022 uint64_t *modifiers = NULL;
7024 num_formats = get_plane_formats(plane, plane_cap, formats,
7025 ARRAY_SIZE(formats));
7027 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7031 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7032 &dm_plane_funcs, formats, num_formats,
7033 modifiers, plane->type, NULL);
7038 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7039 plane_cap && plane_cap->per_pixel_alpha) {
7040 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7041 BIT(DRM_MODE_BLEND_PREMULTI);
7043 drm_plane_create_alpha_property(plane);
7044 drm_plane_create_blend_mode_property(plane, blend_caps);
7047 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7049 (plane_cap->pixel_format_support.nv12 ||
7050 plane_cap->pixel_format_support.p010)) {
7051 /* This only affects YUV formats. */
7052 drm_plane_create_color_properties(
7054 BIT(DRM_COLOR_YCBCR_BT601) |
7055 BIT(DRM_COLOR_YCBCR_BT709) |
7056 BIT(DRM_COLOR_YCBCR_BT2020),
7057 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7058 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7059 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7062 supported_rotations =
7063 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7064 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7066 if (dm->adev->asic_type >= CHIP_BONAIRE &&
7067 plane->type != DRM_PLANE_TYPE_CURSOR)
7068 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7069 supported_rotations);
7071 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7073 /* Create (reset) the plane state */
7074 if (plane->funcs->reset)
7075 plane->funcs->reset(plane);
7080 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7081 struct drm_plane *plane,
7082 uint32_t crtc_index)
7084 struct amdgpu_crtc *acrtc = NULL;
7085 struct drm_plane *cursor_plane;
7089 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7093 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7094 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7096 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7100 res = drm_crtc_init_with_planes(
7105 &amdgpu_dm_crtc_funcs, NULL);
7110 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7112 /* Create (reset) the plane state */
7113 if (acrtc->base.funcs->reset)
7114 acrtc->base.funcs->reset(&acrtc->base);
7116 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7117 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7119 acrtc->crtc_id = crtc_index;
7120 acrtc->base.enabled = false;
7121 acrtc->otg_inst = -1;
7123 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7124 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7125 true, MAX_COLOR_LUT_ENTRIES);
7126 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7132 kfree(cursor_plane);
7137 static int to_drm_connector_type(enum signal_type st)
7140 case SIGNAL_TYPE_HDMI_TYPE_A:
7141 return DRM_MODE_CONNECTOR_HDMIA;
7142 case SIGNAL_TYPE_EDP:
7143 return DRM_MODE_CONNECTOR_eDP;
7144 case SIGNAL_TYPE_LVDS:
7145 return DRM_MODE_CONNECTOR_LVDS;
7146 case SIGNAL_TYPE_RGB:
7147 return DRM_MODE_CONNECTOR_VGA;
7148 case SIGNAL_TYPE_DISPLAY_PORT:
7149 case SIGNAL_TYPE_DISPLAY_PORT_MST:
7150 return DRM_MODE_CONNECTOR_DisplayPort;
7151 case SIGNAL_TYPE_DVI_DUAL_LINK:
7152 case SIGNAL_TYPE_DVI_SINGLE_LINK:
7153 return DRM_MODE_CONNECTOR_DVID;
7154 case SIGNAL_TYPE_VIRTUAL:
7155 return DRM_MODE_CONNECTOR_VIRTUAL;
7158 return DRM_MODE_CONNECTOR_Unknown;
7162 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7164 struct drm_encoder *encoder;
7166 /* There is only one encoder per connector */
7167 drm_connector_for_each_possible_encoder(connector, encoder)
7173 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7175 struct drm_encoder *encoder;
7176 struct amdgpu_encoder *amdgpu_encoder;
7178 encoder = amdgpu_dm_connector_to_encoder(connector);
7180 if (encoder == NULL)
7183 amdgpu_encoder = to_amdgpu_encoder(encoder);
7185 amdgpu_encoder->native_mode.clock = 0;
7187 if (!list_empty(&connector->probed_modes)) {
7188 struct drm_display_mode *preferred_mode = NULL;
7190 list_for_each_entry(preferred_mode,
7191 &connector->probed_modes,
7193 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7194 amdgpu_encoder->native_mode = *preferred_mode;
7202 static struct drm_display_mode *
7203 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7205 int hdisplay, int vdisplay)
7207 struct drm_device *dev = encoder->dev;
7208 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7209 struct drm_display_mode *mode = NULL;
7210 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7212 mode = drm_mode_duplicate(dev, native_mode);
7217 mode->hdisplay = hdisplay;
7218 mode->vdisplay = vdisplay;
7219 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7220 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7226 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7227 struct drm_connector *connector)
7229 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7230 struct drm_display_mode *mode = NULL;
7231 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7232 struct amdgpu_dm_connector *amdgpu_dm_connector =
7233 to_amdgpu_dm_connector(connector);
7237 char name[DRM_DISPLAY_MODE_LEN];
7240 } common_modes[] = {
7241 { "640x480", 640, 480},
7242 { "800x600", 800, 600},
7243 { "1024x768", 1024, 768},
7244 { "1280x720", 1280, 720},
7245 { "1280x800", 1280, 800},
7246 {"1280x1024", 1280, 1024},
7247 { "1440x900", 1440, 900},
7248 {"1680x1050", 1680, 1050},
7249 {"1600x1200", 1600, 1200},
7250 {"1920x1080", 1920, 1080},
7251 {"1920x1200", 1920, 1200}
7254 n = ARRAY_SIZE(common_modes);
7256 for (i = 0; i < n; i++) {
7257 struct drm_display_mode *curmode = NULL;
7258 bool mode_existed = false;
7260 if (common_modes[i].w > native_mode->hdisplay ||
7261 common_modes[i].h > native_mode->vdisplay ||
7262 (common_modes[i].w == native_mode->hdisplay &&
7263 common_modes[i].h == native_mode->vdisplay))
7266 list_for_each_entry(curmode, &connector->probed_modes, head) {
7267 if (common_modes[i].w == curmode->hdisplay &&
7268 common_modes[i].h == curmode->vdisplay) {
7269 mode_existed = true;
7277 mode = amdgpu_dm_create_common_mode(encoder,
7278 common_modes[i].name, common_modes[i].w,
7280 drm_mode_probed_add(connector, mode);
7281 amdgpu_dm_connector->num_modes++;
7285 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7288 struct amdgpu_dm_connector *amdgpu_dm_connector =
7289 to_amdgpu_dm_connector(connector);
7292 /* empty probed_modes */
7293 INIT_LIST_HEAD(&connector->probed_modes);
7294 amdgpu_dm_connector->num_modes =
7295 drm_add_edid_modes(connector, edid);
7297 /* sorting the probed modes before calling function
7298 * amdgpu_dm_get_native_mode() since EDID can have
7299 * more than one preferred mode. The modes that are
7300 * later in the probed mode list could be of higher
7301 * and preferred resolution. For example, 3840x2160
7302 * resolution in base EDID preferred timing and 4096x2160
7303 * preferred resolution in DID extension block later.
7305 drm_mode_sort(&connector->probed_modes);
7306 amdgpu_dm_get_native_mode(connector);
7308 /* Freesync capabilities are reset by calling
7309 * drm_add_edid_modes() and need to be
7312 amdgpu_dm_update_freesync_caps(connector, edid);
7314 amdgpu_dm_connector->num_modes = 0;
7318 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7319 struct drm_display_mode *mode)
7321 struct drm_display_mode *m;
7323 list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7324 if (drm_mode_equal(m, mode))
7331 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7333 const struct drm_display_mode *m;
7334 struct drm_display_mode *new_mode;
7336 uint32_t new_modes_count = 0;
7338 /* Standard FPS values
7347 * 60 - Commonly used
7348 * 48,72,96 - Multiples of 24
7350 const uint32_t common_rates[] = { 23976, 24000, 25000, 29970, 30000,
7351 48000, 50000, 60000, 72000, 96000 };
7354 * Find mode with highest refresh rate with the same resolution
7355 * as the preferred mode. Some monitors report a preferred mode
7356 * with lower resolution than the highest refresh rate supported.
7359 m = get_highest_refresh_rate_mode(aconnector, true);
7363 for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7364 uint64_t target_vtotal, target_vtotal_diff;
7367 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7370 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7371 common_rates[i] > aconnector->max_vfreq * 1000)
7374 num = (unsigned long long)m->clock * 1000 * 1000;
7375 den = common_rates[i] * (unsigned long long)m->htotal;
7376 target_vtotal = div_u64(num, den);
7377 target_vtotal_diff = target_vtotal - m->vtotal;
7379 /* Check for illegal modes */
7380 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7381 m->vsync_end + target_vtotal_diff < m->vsync_start ||
7382 m->vtotal + target_vtotal_diff < m->vsync_end)
7385 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7389 new_mode->vtotal += (u16)target_vtotal_diff;
7390 new_mode->vsync_start += (u16)target_vtotal_diff;
7391 new_mode->vsync_end += (u16)target_vtotal_diff;
7392 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7393 new_mode->type |= DRM_MODE_TYPE_DRIVER;
7395 if (!is_duplicate_mode(aconnector, new_mode)) {
7396 drm_mode_probed_add(&aconnector->base, new_mode);
7397 new_modes_count += 1;
7399 drm_mode_destroy(aconnector->base.dev, new_mode);
7402 return new_modes_count;
7405 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7408 struct amdgpu_dm_connector *amdgpu_dm_connector =
7409 to_amdgpu_dm_connector(connector);
7411 if (!(amdgpu_freesync_vid_mode && edid))
7414 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7415 amdgpu_dm_connector->num_modes +=
7416 add_fs_modes(amdgpu_dm_connector);
7419 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
7421 struct amdgpu_dm_connector *amdgpu_dm_connector =
7422 to_amdgpu_dm_connector(connector);
7423 struct drm_encoder *encoder;
7424 struct edid *edid = amdgpu_dm_connector->edid;
7426 encoder = amdgpu_dm_connector_to_encoder(connector);
7428 if (!drm_edid_is_valid(edid)) {
7429 amdgpu_dm_connector->num_modes =
7430 drm_add_modes_noedid(connector, 640, 480);
7432 amdgpu_dm_connector_ddc_get_modes(connector, edid);
7433 amdgpu_dm_connector_add_common_modes(encoder, connector);
7434 amdgpu_dm_connector_add_freesync_modes(connector, edid);
7436 amdgpu_dm_fbc_init(connector);
7438 return amdgpu_dm_connector->num_modes;
7441 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7442 struct amdgpu_dm_connector *aconnector,
7444 struct dc_link *link,
7447 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7450 * Some of the properties below require access to state, like bpc.
7451 * Allocate some default initial connector state with our reset helper.
7453 if (aconnector->base.funcs->reset)
7454 aconnector->base.funcs->reset(&aconnector->base);
7456 aconnector->connector_id = link_index;
7457 aconnector->dc_link = link;
7458 aconnector->base.interlace_allowed = false;
7459 aconnector->base.doublescan_allowed = false;
7460 aconnector->base.stereo_allowed = false;
7461 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7462 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7463 aconnector->audio_inst = -1;
7464 mutex_init(&aconnector->hpd_lock);
7467 * configure support HPD hot plug connector_>polled default value is 0
7468 * which means HPD hot plug not supported
7470 switch (connector_type) {
7471 case DRM_MODE_CONNECTOR_HDMIA:
7472 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7473 aconnector->base.ycbcr_420_allowed =
7474 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7476 case DRM_MODE_CONNECTOR_DisplayPort:
7477 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7478 aconnector->base.ycbcr_420_allowed =
7479 link->link_enc->features.dp_ycbcr420_supported ? true : false;
7481 case DRM_MODE_CONNECTOR_DVID:
7482 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7488 drm_object_attach_property(&aconnector->base.base,
7489 dm->ddev->mode_config.scaling_mode_property,
7490 DRM_MODE_SCALE_NONE);
7492 drm_object_attach_property(&aconnector->base.base,
7493 adev->mode_info.underscan_property,
7495 drm_object_attach_property(&aconnector->base.base,
7496 adev->mode_info.underscan_hborder_property,
7498 drm_object_attach_property(&aconnector->base.base,
7499 adev->mode_info.underscan_vborder_property,
7502 if (!aconnector->mst_port)
7503 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7505 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
7506 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7507 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7509 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7510 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7511 drm_object_attach_property(&aconnector->base.base,
7512 adev->mode_info.abm_level_property, 0);
7515 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7516 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7517 connector_type == DRM_MODE_CONNECTOR_eDP) {
7518 drm_object_attach_property(
7519 &aconnector->base.base,
7520 dm->ddev->mode_config.hdr_output_metadata_property, 0);
7522 if (!aconnector->mst_port)
7523 drm_connector_attach_vrr_capable_property(&aconnector->base);
7525 #ifdef CONFIG_DRM_AMD_DC_HDCP
7526 if (adev->dm.hdcp_workqueue)
7527 drm_connector_attach_content_protection_property(&aconnector->base, true);
7532 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7533 struct i2c_msg *msgs, int num)
7535 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7536 struct ddc_service *ddc_service = i2c->ddc_service;
7537 struct i2c_command cmd;
7541 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7546 cmd.number_of_payloads = num;
7547 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7550 for (i = 0; i < num; i++) {
7551 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7552 cmd.payloads[i].address = msgs[i].addr;
7553 cmd.payloads[i].length = msgs[i].len;
7554 cmd.payloads[i].data = msgs[i].buf;
7558 ddc_service->ctx->dc,
7559 ddc_service->ddc_pin->hw_info.ddc_channel,
7563 kfree(cmd.payloads);
7567 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7569 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7572 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7573 .master_xfer = amdgpu_dm_i2c_xfer,
7574 .functionality = amdgpu_dm_i2c_func,
7577 static struct amdgpu_i2c_adapter *
7578 create_i2c(struct ddc_service *ddc_service,
7582 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7583 struct amdgpu_i2c_adapter *i2c;
7585 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7588 i2c->base.owner = THIS_MODULE;
7589 i2c->base.class = I2C_CLASS_DDC;
7590 i2c->base.dev.parent = &adev->pdev->dev;
7591 i2c->base.algo = &amdgpu_dm_i2c_algo;
7592 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7593 i2c_set_adapdata(&i2c->base, i2c);
7594 i2c->ddc_service = ddc_service;
7595 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7602 * Note: this function assumes that dc_link_detect() was called for the
7603 * dc_link which will be represented by this aconnector.
7605 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7606 struct amdgpu_dm_connector *aconnector,
7607 uint32_t link_index,
7608 struct amdgpu_encoder *aencoder)
7612 struct dc *dc = dm->dc;
7613 struct dc_link *link = dc_get_link_at_index(dc, link_index);
7614 struct amdgpu_i2c_adapter *i2c;
7616 link->priv = aconnector;
7618 DRM_DEBUG_DRIVER("%s()\n", __func__);
7620 i2c = create_i2c(link->ddc, link->link_index, &res);
7622 DRM_ERROR("Failed to create i2c adapter data\n");
7626 aconnector->i2c = i2c;
7627 res = i2c_add_adapter(&i2c->base);
7630 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7634 connector_type = to_drm_connector_type(link->connector_signal);
7636 res = drm_connector_init_with_ddc(
7639 &amdgpu_dm_connector_funcs,
7644 DRM_ERROR("connector_init failed\n");
7645 aconnector->connector_id = -1;
7649 drm_connector_helper_add(
7651 &amdgpu_dm_connector_helper_funcs);
7653 amdgpu_dm_connector_init_helper(
7660 drm_connector_attach_encoder(
7661 &aconnector->base, &aencoder->base);
7663 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7664 || connector_type == DRM_MODE_CONNECTOR_eDP)
7665 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7670 aconnector->i2c = NULL;
7675 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7677 switch (adev->mode_info.num_crtc) {
7694 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7695 struct amdgpu_encoder *aencoder,
7696 uint32_t link_index)
7698 struct amdgpu_device *adev = drm_to_adev(dev);
7700 int res = drm_encoder_init(dev,
7702 &amdgpu_dm_encoder_funcs,
7703 DRM_MODE_ENCODER_TMDS,
7706 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7709 aencoder->encoder_id = link_index;
7711 aencoder->encoder_id = -1;
7713 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7718 static void manage_dm_interrupts(struct amdgpu_device *adev,
7719 struct amdgpu_crtc *acrtc,
7723 * We have no guarantee that the frontend index maps to the same
7724 * backend index - some even map to more than one.
7726 * TODO: Use a different interrupt or check DC itself for the mapping.
7729 amdgpu_display_crtc_idx_to_irq_type(
7734 drm_crtc_vblank_on(&acrtc->base);
7737 &adev->pageflip_irq,
7739 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7746 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7754 &adev->pageflip_irq,
7756 drm_crtc_vblank_off(&acrtc->base);
7760 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7761 struct amdgpu_crtc *acrtc)
7764 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7767 * This reads the current state for the IRQ and force reapplies
7768 * the setting to hardware.
7770 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7774 is_scaling_state_different(const struct dm_connector_state *dm_state,
7775 const struct dm_connector_state *old_dm_state)
7777 if (dm_state->scaling != old_dm_state->scaling)
7779 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7780 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7782 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7783 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7785 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7786 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7791 #ifdef CONFIG_DRM_AMD_DC_HDCP
7792 static bool is_content_protection_different(struct drm_connector_state *state,
7793 const struct drm_connector_state *old_state,
7794 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7796 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7797 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7799 /* Handle: Type0/1 change */
7800 if (old_state->hdcp_content_type != state->hdcp_content_type &&
7801 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7802 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7806 /* CP is being re enabled, ignore this
7808 * Handles: ENABLED -> DESIRED
7810 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7811 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7812 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7816 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7818 * Handles: UNDESIRED -> ENABLED
7820 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7821 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7822 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7824 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7825 * hot-plug, headless s3, dpms
7827 * Handles: DESIRED -> DESIRED (Special case)
7829 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7830 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7831 dm_con_state->update_hdcp = false;
7836 * Handles: UNDESIRED -> UNDESIRED
7837 * DESIRED -> DESIRED
7838 * ENABLED -> ENABLED
7840 if (old_state->content_protection == state->content_protection)
7844 * Handles: UNDESIRED -> DESIRED
7845 * DESIRED -> UNDESIRED
7846 * ENABLED -> UNDESIRED
7848 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7852 * Handles: DESIRED -> ENABLED
7858 static void remove_stream(struct amdgpu_device *adev,
7859 struct amdgpu_crtc *acrtc,
7860 struct dc_stream_state *stream)
7862 /* this is the update mode case */
7864 acrtc->otg_inst = -1;
7865 acrtc->enabled = false;
7868 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7869 struct dc_cursor_position *position)
7871 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7873 int xorigin = 0, yorigin = 0;
7875 if (!crtc || !plane->state->fb)
7878 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7879 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7880 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7882 plane->state->crtc_w,
7883 plane->state->crtc_h);
7887 x = plane->state->crtc_x;
7888 y = plane->state->crtc_y;
7890 if (x <= -amdgpu_crtc->max_cursor_width ||
7891 y <= -amdgpu_crtc->max_cursor_height)
7895 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
7899 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
7902 position->enable = true;
7903 position->translate_by_source = true;
7906 position->x_hotspot = xorigin;
7907 position->y_hotspot = yorigin;
7912 static void handle_cursor_update(struct drm_plane *plane,
7913 struct drm_plane_state *old_plane_state)
7915 struct amdgpu_device *adev = drm_to_adev(plane->dev);
7916 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
7917 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
7918 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
7919 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7920 uint64_t address = afb ? afb->address : 0;
7921 struct dc_cursor_position position = {0};
7922 struct dc_cursor_attributes attributes;
7925 if (!plane->state->fb && !old_plane_state->fb)
7928 DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
7930 amdgpu_crtc->crtc_id,
7931 plane->state->crtc_w,
7932 plane->state->crtc_h);
7934 ret = get_cursor_position(plane, crtc, &position);
7938 if (!position.enable) {
7939 /* turn off cursor */
7940 if (crtc_state && crtc_state->stream) {
7941 mutex_lock(&adev->dm.dc_lock);
7942 dc_stream_set_cursor_position(crtc_state->stream,
7944 mutex_unlock(&adev->dm.dc_lock);
7949 amdgpu_crtc->cursor_width = plane->state->crtc_w;
7950 amdgpu_crtc->cursor_height = plane->state->crtc_h;
7952 memset(&attributes, 0, sizeof(attributes));
7953 attributes.address.high_part = upper_32_bits(address);
7954 attributes.address.low_part = lower_32_bits(address);
7955 attributes.width = plane->state->crtc_w;
7956 attributes.height = plane->state->crtc_h;
7957 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
7958 attributes.rotation_angle = 0;
7959 attributes.attribute_flags.value = 0;
7961 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
7963 if (crtc_state->stream) {
7964 mutex_lock(&adev->dm.dc_lock);
7965 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
7967 DRM_ERROR("DC failed to set cursor attributes\n");
7969 if (!dc_stream_set_cursor_position(crtc_state->stream,
7971 DRM_ERROR("DC failed to set cursor position\n");
7972 mutex_unlock(&adev->dm.dc_lock);
7976 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7979 assert_spin_locked(&acrtc->base.dev->event_lock);
7980 WARN_ON(acrtc->event);
7982 acrtc->event = acrtc->base.state->event;
7984 /* Set the flip status */
7985 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7987 /* Mark this event as consumed */
7988 acrtc->base.state->event = NULL;
7990 DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7994 static void update_freesync_state_on_stream(
7995 struct amdgpu_display_manager *dm,
7996 struct dm_crtc_state *new_crtc_state,
7997 struct dc_stream_state *new_stream,
7998 struct dc_plane_state *surface,
7999 u32 flip_timestamp_in_us)
8001 struct mod_vrr_params vrr_params;
8002 struct dc_info_packet vrr_infopacket = {0};
8003 struct amdgpu_device *adev = dm->adev;
8004 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8005 unsigned long flags;
8006 bool pack_sdp_v1_3 = false;
8012 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8013 * For now it's sufficient to just guard against these conditions.
8016 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8019 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8020 vrr_params = acrtc->dm_irq_params.vrr_params;
8023 mod_freesync_handle_preflip(
8024 dm->freesync_module,
8027 flip_timestamp_in_us,
8030 if (adev->family < AMDGPU_FAMILY_AI &&
8031 amdgpu_dm_vrr_active(new_crtc_state)) {
8032 mod_freesync_handle_v_update(dm->freesync_module,
8033 new_stream, &vrr_params);
8035 /* Need to call this before the frame ends. */
8036 dc_stream_adjust_vmin_vmax(dm->dc,
8037 new_crtc_state->stream,
8038 &vrr_params.adjust);
8042 mod_freesync_build_vrr_infopacket(
8043 dm->freesync_module,
8047 TRANSFER_FUNC_UNKNOWN,
8051 new_crtc_state->freesync_timing_changed |=
8052 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8054 sizeof(vrr_params.adjust)) != 0);
8056 new_crtc_state->freesync_vrr_info_changed |=
8057 (memcmp(&new_crtc_state->vrr_infopacket,
8059 sizeof(vrr_infopacket)) != 0);
8061 acrtc->dm_irq_params.vrr_params = vrr_params;
8062 new_crtc_state->vrr_infopacket = vrr_infopacket;
8064 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8065 new_stream->vrr_infopacket = vrr_infopacket;
8067 if (new_crtc_state->freesync_vrr_info_changed)
8068 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8069 new_crtc_state->base.crtc->base.id,
8070 (int)new_crtc_state->base.vrr_enabled,
8071 (int)vrr_params.state);
8073 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8076 static void update_stream_irq_parameters(
8077 struct amdgpu_display_manager *dm,
8078 struct dm_crtc_state *new_crtc_state)
8080 struct dc_stream_state *new_stream = new_crtc_state->stream;
8081 struct mod_vrr_params vrr_params;
8082 struct mod_freesync_config config = new_crtc_state->freesync_config;
8083 struct amdgpu_device *adev = dm->adev;
8084 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8085 unsigned long flags;
8091 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8092 * For now it's sufficient to just guard against these conditions.
8094 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8097 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8098 vrr_params = acrtc->dm_irq_params.vrr_params;
8100 if (new_crtc_state->vrr_supported &&
8101 config.min_refresh_in_uhz &&
8102 config.max_refresh_in_uhz) {
8104 * if freesync compatible mode was set, config.state will be set
8107 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8108 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8109 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8110 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8111 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8112 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8113 vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8115 config.state = new_crtc_state->base.vrr_enabled ?
8116 VRR_STATE_ACTIVE_VARIABLE :
8120 config.state = VRR_STATE_UNSUPPORTED;
8123 mod_freesync_build_vrr_params(dm->freesync_module,
8125 &config, &vrr_params);
8127 new_crtc_state->freesync_timing_changed |=
8128 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8129 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8131 new_crtc_state->freesync_config = config;
8132 /* Copy state for access from DM IRQ handler */
8133 acrtc->dm_irq_params.freesync_config = config;
8134 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8135 acrtc->dm_irq_params.vrr_params = vrr_params;
8136 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8139 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8140 struct dm_crtc_state *new_state)
8142 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8143 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8145 if (!old_vrr_active && new_vrr_active) {
8146 /* Transition VRR inactive -> active:
8147 * While VRR is active, we must not disable vblank irq, as a
8148 * reenable after disable would compute bogus vblank/pflip
8149 * timestamps if it likely happened inside display front-porch.
8151 * We also need vupdate irq for the actual core vblank handling
8154 dm_set_vupdate_irq(new_state->base.crtc, true);
8155 drm_crtc_vblank_get(new_state->base.crtc);
8156 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8157 __func__, new_state->base.crtc->base.id);
8158 } else if (old_vrr_active && !new_vrr_active) {
8159 /* Transition VRR active -> inactive:
8160 * Allow vblank irq disable again for fixed refresh rate.
8162 dm_set_vupdate_irq(new_state->base.crtc, false);
8163 drm_crtc_vblank_put(new_state->base.crtc);
8164 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8165 __func__, new_state->base.crtc->base.id);
8169 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8171 struct drm_plane *plane;
8172 struct drm_plane_state *old_plane_state, *new_plane_state;
8176 * TODO: Make this per-stream so we don't issue redundant updates for
8177 * commits with multiple streams.
8179 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
8181 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8182 handle_cursor_update(plane, old_plane_state);
8185 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8186 struct dc_state *dc_state,
8187 struct drm_device *dev,
8188 struct amdgpu_display_manager *dm,
8189 struct drm_crtc *pcrtc,
8190 bool wait_for_vblank)
8193 uint64_t timestamp_ns;
8194 struct drm_plane *plane;
8195 struct drm_plane_state *old_plane_state, *new_plane_state;
8196 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8197 struct drm_crtc_state *new_pcrtc_state =
8198 drm_atomic_get_new_crtc_state(state, pcrtc);
8199 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8200 struct dm_crtc_state *dm_old_crtc_state =
8201 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8202 int planes_count = 0, vpos, hpos;
8204 unsigned long flags;
8205 struct amdgpu_bo *abo;
8206 uint32_t target_vblank, last_flip_vblank;
8207 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8208 bool pflip_present = false;
8210 struct dc_surface_update surface_updates[MAX_SURFACES];
8211 struct dc_plane_info plane_infos[MAX_SURFACES];
8212 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8213 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8214 struct dc_stream_update stream_update;
8217 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8220 dm_error("Failed to allocate update bundle\n");
8225 * Disable the cursor first if we're disabling all the planes.
8226 * It'll remain on the screen after the planes are re-enabled
8229 if (acrtc_state->active_planes == 0)
8230 amdgpu_dm_commit_cursors(state);
8232 /* update planes when needed */
8233 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8234 struct drm_crtc *crtc = new_plane_state->crtc;
8235 struct drm_crtc_state *new_crtc_state;
8236 struct drm_framebuffer *fb = new_plane_state->fb;
8237 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8238 bool plane_needs_flip;
8239 struct dc_plane_state *dc_plane;
8240 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8242 /* Cursor plane is handled after stream updates */
8243 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8246 if (!fb || !crtc || pcrtc != crtc)
8249 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8250 if (!new_crtc_state->active)
8253 dc_plane = dm_new_plane_state->dc_state;
8255 bundle->surface_updates[planes_count].surface = dc_plane;
8256 if (new_pcrtc_state->color_mgmt_changed) {
8257 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8258 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8259 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8262 fill_dc_scaling_info(new_plane_state,
8263 &bundle->scaling_infos[planes_count]);
8265 bundle->surface_updates[planes_count].scaling_info =
8266 &bundle->scaling_infos[planes_count];
8268 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8270 pflip_present = pflip_present || plane_needs_flip;
8272 if (!plane_needs_flip) {
8277 abo = gem_to_amdgpu_bo(fb->obj[0]);
8280 * Wait for all fences on this FB. Do limited wait to avoid
8281 * deadlock during GPU reset when this fence will not signal
8282 * but we hold reservation lock for the BO.
8284 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
8286 msecs_to_jiffies(5000));
8287 if (unlikely(r <= 0))
8288 DRM_ERROR("Waiting for fences timed out!");
8290 fill_dc_plane_info_and_addr(
8291 dm->adev, new_plane_state,
8293 &bundle->plane_infos[planes_count],
8294 &bundle->flip_addrs[planes_count].address,
8295 afb->tmz_surface, false);
8297 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
8298 new_plane_state->plane->index,
8299 bundle->plane_infos[planes_count].dcc.enable);
8301 bundle->surface_updates[planes_count].plane_info =
8302 &bundle->plane_infos[planes_count];
8305 * Only allow immediate flips for fast updates that don't
8306 * change FB pitch, DCC state, rotation or mirroing.
8308 bundle->flip_addrs[planes_count].flip_immediate =
8309 crtc->state->async_flip &&
8310 acrtc_state->update_type == UPDATE_TYPE_FAST;
8312 timestamp_ns = ktime_get_ns();
8313 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8314 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8315 bundle->surface_updates[planes_count].surface = dc_plane;
8317 if (!bundle->surface_updates[planes_count].surface) {
8318 DRM_ERROR("No surface for CRTC: id=%d\n",
8319 acrtc_attach->crtc_id);
8323 if (plane == pcrtc->primary)
8324 update_freesync_state_on_stream(
8327 acrtc_state->stream,
8329 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
8331 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
8333 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8334 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
8340 if (pflip_present) {
8342 /* Use old throttling in non-vrr fixed refresh rate mode
8343 * to keep flip scheduling based on target vblank counts
8344 * working in a backwards compatible way, e.g., for
8345 * clients using the GLX_OML_sync_control extension or
8346 * DRI3/Present extension with defined target_msc.
8348 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
8351 /* For variable refresh rate mode only:
8352 * Get vblank of last completed flip to avoid > 1 vrr
8353 * flips per video frame by use of throttling, but allow
8354 * flip programming anywhere in the possibly large
8355 * variable vrr vblank interval for fine-grained flip
8356 * timing control and more opportunity to avoid stutter
8357 * on late submission of flips.
8359 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8360 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
8361 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8364 target_vblank = last_flip_vblank + wait_for_vblank;
8367 * Wait until we're out of the vertical blank period before the one
8368 * targeted by the flip
8370 while ((acrtc_attach->enabled &&
8371 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8372 0, &vpos, &hpos, NULL,
8373 NULL, &pcrtc->hwmode)
8374 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8375 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8376 (int)(target_vblank -
8377 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8378 usleep_range(1000, 1100);
8382 * Prepare the flip event for the pageflip interrupt to handle.
8384 * This only works in the case where we've already turned on the
8385 * appropriate hardware blocks (eg. HUBP) so in the transition case
8386 * from 0 -> n planes we have to skip a hardware generated event
8387 * and rely on sending it from software.
8389 if (acrtc_attach->base.state->event &&
8390 acrtc_state->active_planes > 0) {
8391 drm_crtc_vblank_get(pcrtc);
8393 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8395 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8396 prepare_flip_isr(acrtc_attach);
8398 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8401 if (acrtc_state->stream) {
8402 if (acrtc_state->freesync_vrr_info_changed)
8403 bundle->stream_update.vrr_infopacket =
8404 &acrtc_state->stream->vrr_infopacket;
8408 /* Update the planes if changed or disable if we don't have any. */
8409 if ((planes_count || acrtc_state->active_planes == 0) &&
8410 acrtc_state->stream) {
8411 bundle->stream_update.stream = acrtc_state->stream;
8412 if (new_pcrtc_state->mode_changed) {
8413 bundle->stream_update.src = acrtc_state->stream->src;
8414 bundle->stream_update.dst = acrtc_state->stream->dst;
8417 if (new_pcrtc_state->color_mgmt_changed) {
8419 * TODO: This isn't fully correct since we've actually
8420 * already modified the stream in place.
8422 bundle->stream_update.gamut_remap =
8423 &acrtc_state->stream->gamut_remap_matrix;
8424 bundle->stream_update.output_csc_transform =
8425 &acrtc_state->stream->csc_color_matrix;
8426 bundle->stream_update.out_transfer_func =
8427 acrtc_state->stream->out_transfer_func;
8430 acrtc_state->stream->abm_level = acrtc_state->abm_level;
8431 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
8432 bundle->stream_update.abm_level = &acrtc_state->abm_level;
8435 * If FreeSync state on the stream has changed then we need to
8436 * re-adjust the min/max bounds now that DC doesn't handle this
8437 * as part of commit.
8439 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
8440 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8441 dc_stream_adjust_vmin_vmax(
8442 dm->dc, acrtc_state->stream,
8443 &acrtc_attach->dm_irq_params.vrr_params.adjust);
8444 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8446 mutex_lock(&dm->dc_lock);
8447 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8448 acrtc_state->stream->link->psr_settings.psr_allow_active)
8449 amdgpu_dm_psr_disable(acrtc_state->stream);
8451 dc_commit_updates_for_stream(dm->dc,
8452 bundle->surface_updates,
8454 acrtc_state->stream,
8455 &bundle->stream_update,
8459 * Enable or disable the interrupts on the backend.
8461 * Most pipes are put into power gating when unused.
8463 * When power gating is enabled on a pipe we lose the
8464 * interrupt enablement state when power gating is disabled.
8466 * So we need to update the IRQ control state in hardware
8467 * whenever the pipe turns on (since it could be previously
8468 * power gated) or off (since some pipes can't be power gated
8471 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8472 dm_update_pflip_irq_state(drm_to_adev(dev),
8475 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8476 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8477 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8478 amdgpu_dm_link_setup_psr(acrtc_state->stream);
8479 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
8480 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8481 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
8482 amdgpu_dm_psr_enable(acrtc_state->stream);
8485 mutex_unlock(&dm->dc_lock);
8489 * Update cursor state *after* programming all the planes.
8490 * This avoids redundant programming in the case where we're going
8491 * to be disabling a single plane - those pipes are being disabled.
8493 if (acrtc_state->active_planes)
8494 amdgpu_dm_commit_cursors(state);
8500 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8501 struct drm_atomic_state *state)
8503 struct amdgpu_device *adev = drm_to_adev(dev);
8504 struct amdgpu_dm_connector *aconnector;
8505 struct drm_connector *connector;
8506 struct drm_connector_state *old_con_state, *new_con_state;
8507 struct drm_crtc_state *new_crtc_state;
8508 struct dm_crtc_state *new_dm_crtc_state;
8509 const struct dc_stream_status *status;
8512 /* Notify device removals. */
8513 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8514 if (old_con_state->crtc != new_con_state->crtc) {
8515 /* CRTC changes require notification. */
8519 if (!new_con_state->crtc)
8522 new_crtc_state = drm_atomic_get_new_crtc_state(
8523 state, new_con_state->crtc);
8525 if (!new_crtc_state)
8528 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8532 aconnector = to_amdgpu_dm_connector(connector);
8534 mutex_lock(&adev->dm.audio_lock);
8535 inst = aconnector->audio_inst;
8536 aconnector->audio_inst = -1;
8537 mutex_unlock(&adev->dm.audio_lock);
8539 amdgpu_dm_audio_eld_notify(adev, inst);
8542 /* Notify audio device additions. */
8543 for_each_new_connector_in_state(state, connector, new_con_state, i) {
8544 if (!new_con_state->crtc)
8547 new_crtc_state = drm_atomic_get_new_crtc_state(
8548 state, new_con_state->crtc);
8550 if (!new_crtc_state)
8553 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8556 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8557 if (!new_dm_crtc_state->stream)
8560 status = dc_stream_get_status(new_dm_crtc_state->stream);
8564 aconnector = to_amdgpu_dm_connector(connector);
8566 mutex_lock(&adev->dm.audio_lock);
8567 inst = status->audio_inst;
8568 aconnector->audio_inst = inst;
8569 mutex_unlock(&adev->dm.audio_lock);
8571 amdgpu_dm_audio_eld_notify(adev, inst);
8576 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8577 * @crtc_state: the DRM CRTC state
8578 * @stream_state: the DC stream state.
8580 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8581 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8583 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8584 struct dc_stream_state *stream_state)
8586 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8590 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8591 * @state: The atomic state to commit
8593 * This will tell DC to commit the constructed DC state from atomic_check,
8594 * programming the hardware. Any failures here implies a hardware failure, since
8595 * atomic check should have filtered anything non-kosher.
8597 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8599 struct drm_device *dev = state->dev;
8600 struct amdgpu_device *adev = drm_to_adev(dev);
8601 struct amdgpu_display_manager *dm = &adev->dm;
8602 struct dm_atomic_state *dm_state;
8603 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8605 struct drm_crtc *crtc;
8606 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8607 unsigned long flags;
8608 bool wait_for_vblank = true;
8609 struct drm_connector *connector;
8610 struct drm_connector_state *old_con_state, *new_con_state;
8611 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8612 int crtc_disable_count = 0;
8613 bool mode_set_reset_required = false;
8615 trace_amdgpu_dm_atomic_commit_tail_begin(state);
8617 drm_atomic_helper_update_legacy_modeset_state(dev, state);
8619 dm_state = dm_atomic_get_new_state(state);
8620 if (dm_state && dm_state->context) {
8621 dc_state = dm_state->context;
8623 /* No state changes, retain current state. */
8624 dc_state_temp = dc_create_state(dm->dc);
8625 ASSERT(dc_state_temp);
8626 dc_state = dc_state_temp;
8627 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8630 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8631 new_crtc_state, i) {
8632 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8634 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8636 if (old_crtc_state->active &&
8637 (!new_crtc_state->active ||
8638 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8639 manage_dm_interrupts(adev, acrtc, false);
8640 dc_stream_release(dm_old_crtc_state->stream);
8644 drm_atomic_helper_calc_timestamping_constants(state);
8646 /* update changed items */
8647 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8648 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8650 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8651 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8654 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8655 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8656 "connectors_changed:%d\n",
8658 new_crtc_state->enable,
8659 new_crtc_state->active,
8660 new_crtc_state->planes_changed,
8661 new_crtc_state->mode_changed,
8662 new_crtc_state->active_changed,
8663 new_crtc_state->connectors_changed);
8665 /* Disable cursor if disabling crtc */
8666 if (old_crtc_state->active && !new_crtc_state->active) {
8667 struct dc_cursor_position position;
8669 memset(&position, 0, sizeof(position));
8670 mutex_lock(&dm->dc_lock);
8671 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8672 mutex_unlock(&dm->dc_lock);
8675 /* Copy all transient state flags into dc state */
8676 if (dm_new_crtc_state->stream) {
8677 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8678 dm_new_crtc_state->stream);
8681 /* handles headless hotplug case, updating new_state and
8682 * aconnector as needed
8685 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8687 DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8689 if (!dm_new_crtc_state->stream) {
8691 * this could happen because of issues with
8692 * userspace notifications delivery.
8693 * In this case userspace tries to set mode on
8694 * display which is disconnected in fact.
8695 * dc_sink is NULL in this case on aconnector.
8696 * We expect reset mode will come soon.
8698 * This can also happen when unplug is done
8699 * during resume sequence ended
8701 * In this case, we want to pretend we still
8702 * have a sink to keep the pipe running so that
8703 * hw state is consistent with the sw state
8705 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8706 __func__, acrtc->base.base.id);
8710 if (dm_old_crtc_state->stream)
8711 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8713 pm_runtime_get_noresume(dev->dev);
8715 acrtc->enabled = true;
8716 acrtc->hw_mode = new_crtc_state->mode;
8717 crtc->hwmode = new_crtc_state->mode;
8718 mode_set_reset_required = true;
8719 } else if (modereset_required(new_crtc_state)) {
8720 DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8721 /* i.e. reset mode */
8722 if (dm_old_crtc_state->stream)
8723 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8725 mode_set_reset_required = true;
8727 } /* for_each_crtc_in_state() */
8730 /* if there mode set or reset, disable eDP PSR */
8731 if (mode_set_reset_required)
8732 amdgpu_dm_psr_disable_all(dm);
8734 dm_enable_per_frame_crtc_master_sync(dc_state);
8735 mutex_lock(&dm->dc_lock);
8736 WARN_ON(!dc_commit_state(dm->dc, dc_state));
8737 #if defined(CONFIG_DRM_AMD_DC_DCN)
8738 /* Allow idle optimization when vblank count is 0 for display off */
8739 if (dm->active_vblank_irq_count == 0)
8740 dc_allow_idle_optimizations(dm->dc,true);
8742 mutex_unlock(&dm->dc_lock);
8745 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8746 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8748 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8750 if (dm_new_crtc_state->stream != NULL) {
8751 const struct dc_stream_status *status =
8752 dc_stream_get_status(dm_new_crtc_state->stream);
8755 status = dc_stream_get_status_from_state(dc_state,
8756 dm_new_crtc_state->stream);
8758 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8760 acrtc->otg_inst = status->primary_otg_inst;
8763 #ifdef CONFIG_DRM_AMD_DC_HDCP
8764 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8765 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8766 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8767 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8769 new_crtc_state = NULL;
8772 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8774 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8776 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8777 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8778 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8779 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8780 dm_new_con_state->update_hdcp = true;
8784 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8785 hdcp_update_display(
8786 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8787 new_con_state->hdcp_content_type,
8788 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
8792 /* Handle connector state changes */
8793 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8794 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8795 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8796 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8797 struct dc_surface_update dummy_updates[MAX_SURFACES];
8798 struct dc_stream_update stream_update;
8799 struct dc_info_packet hdr_packet;
8800 struct dc_stream_status *status = NULL;
8801 bool abm_changed, hdr_changed, scaling_changed;
8803 memset(&dummy_updates, 0, sizeof(dummy_updates));
8804 memset(&stream_update, 0, sizeof(stream_update));
8807 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8808 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8811 /* Skip any modesets/resets */
8812 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8815 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8816 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8818 scaling_changed = is_scaling_state_different(dm_new_con_state,
8821 abm_changed = dm_new_crtc_state->abm_level !=
8822 dm_old_crtc_state->abm_level;
8825 is_hdr_metadata_different(old_con_state, new_con_state);
8827 if (!scaling_changed && !abm_changed && !hdr_changed)
8830 stream_update.stream = dm_new_crtc_state->stream;
8831 if (scaling_changed) {
8832 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8833 dm_new_con_state, dm_new_crtc_state->stream);
8835 stream_update.src = dm_new_crtc_state->stream->src;
8836 stream_update.dst = dm_new_crtc_state->stream->dst;
8840 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8842 stream_update.abm_level = &dm_new_crtc_state->abm_level;
8846 fill_hdr_info_packet(new_con_state, &hdr_packet);
8847 stream_update.hdr_static_metadata = &hdr_packet;
8850 status = dc_stream_get_status(dm_new_crtc_state->stream);
8852 WARN_ON(!status->plane_count);
8855 * TODO: DC refuses to perform stream updates without a dc_surface_update.
8856 * Here we create an empty update on each plane.
8857 * To fix this, DC should permit updating only stream properties.
8859 for (j = 0; j < status->plane_count; j++)
8860 dummy_updates[j].surface = status->plane_states[0];
8863 mutex_lock(&dm->dc_lock);
8864 dc_commit_updates_for_stream(dm->dc,
8866 status->plane_count,
8867 dm_new_crtc_state->stream,
8870 mutex_unlock(&dm->dc_lock);
8873 /* Count number of newly disabled CRTCs for dropping PM refs later. */
8874 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8875 new_crtc_state, i) {
8876 if (old_crtc_state->active && !new_crtc_state->active)
8877 crtc_disable_count++;
8879 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8880 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8882 /* For freesync config update on crtc state and params for irq */
8883 update_stream_irq_parameters(dm, dm_new_crtc_state);
8885 /* Handle vrr on->off / off->on transitions */
8886 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8891 * Enable interrupts for CRTCs that are newly enabled or went through
8892 * a modeset. It was intentionally deferred until after the front end
8893 * state was modified to wait until the OTG was on and so the IRQ
8894 * handlers didn't access stale or invalid state.
8896 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8897 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8898 #ifdef CONFIG_DEBUG_FS
8899 bool configure_crc = false;
8900 enum amdgpu_dm_pipe_crc_source cur_crc_src;
8902 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8904 if (new_crtc_state->active &&
8905 (!old_crtc_state->active ||
8906 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8907 dc_stream_retain(dm_new_crtc_state->stream);
8908 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8909 manage_dm_interrupts(adev, acrtc, true);
8911 #ifdef CONFIG_DEBUG_FS
8913 * Frontend may have changed so reapply the CRC capture
8914 * settings for the stream.
8916 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8917 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8918 cur_crc_src = acrtc->dm_irq_params.crc_src;
8919 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8921 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
8922 configure_crc = true;
8923 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8924 if (amdgpu_dm_crc_window_is_activated(crtc))
8925 configure_crc = false;
8930 amdgpu_dm_crtc_configure_crc_source(
8931 crtc, dm_new_crtc_state, cur_crc_src);
8936 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
8937 if (new_crtc_state->async_flip)
8938 wait_for_vblank = false;
8940 /* update planes when needed per crtc*/
8941 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
8942 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8944 if (dm_new_crtc_state->stream)
8945 amdgpu_dm_commit_planes(state, dc_state, dev,
8946 dm, crtc, wait_for_vblank);
8949 /* Update audio instances for each connector. */
8950 amdgpu_dm_commit_audio(dev, state);
8953 * send vblank event on all events not handled in flip and
8954 * mark consumed event for drm_atomic_helper_commit_hw_done
8956 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8957 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8959 if (new_crtc_state->event)
8960 drm_send_event_locked(dev, &new_crtc_state->event->base);
8962 new_crtc_state->event = NULL;
8964 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8966 /* Signal HW programming completion */
8967 drm_atomic_helper_commit_hw_done(state);
8969 if (wait_for_vblank)
8970 drm_atomic_helper_wait_for_flip_done(dev, state);
8972 drm_atomic_helper_cleanup_planes(dev, state);
8974 /* return the stolen vga memory back to VRAM */
8975 if (!adev->mman.keep_stolen_vga_memory)
8976 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8977 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8980 * Finally, drop a runtime PM reference for each newly disabled CRTC,
8981 * so we can put the GPU into runtime suspend if we're not driving any
8984 for (i = 0; i < crtc_disable_count; i++)
8985 pm_runtime_put_autosuspend(dev->dev);
8986 pm_runtime_mark_last_busy(dev->dev);
8989 dc_release_state(dc_state_temp);
8993 static int dm_force_atomic_commit(struct drm_connector *connector)
8996 struct drm_device *ddev = connector->dev;
8997 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8998 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8999 struct drm_plane *plane = disconnected_acrtc->base.primary;
9000 struct drm_connector_state *conn_state;
9001 struct drm_crtc_state *crtc_state;
9002 struct drm_plane_state *plane_state;
9007 state->acquire_ctx = ddev->mode_config.acquire_ctx;
9009 /* Construct an atomic state to restore previous display setting */
9012 * Attach connectors to drm_atomic_state
9014 conn_state = drm_atomic_get_connector_state(state, connector);
9016 ret = PTR_ERR_OR_ZERO(conn_state);
9020 /* Attach crtc to drm_atomic_state*/
9021 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9023 ret = PTR_ERR_OR_ZERO(crtc_state);
9027 /* force a restore */
9028 crtc_state->mode_changed = true;
9030 /* Attach plane to drm_atomic_state */
9031 plane_state = drm_atomic_get_plane_state(state, plane);
9033 ret = PTR_ERR_OR_ZERO(plane_state);
9037 /* Call commit internally with the state we just constructed */
9038 ret = drm_atomic_commit(state);
9041 drm_atomic_state_put(state);
9043 DRM_ERROR("Restoring old state failed with %i\n", ret);
9049 * This function handles all cases when set mode does not come upon hotplug.
9050 * This includes when a display is unplugged then plugged back into the
9051 * same port and when running without usermode desktop manager supprot
9053 void dm_restore_drm_connector_state(struct drm_device *dev,
9054 struct drm_connector *connector)
9056 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9057 struct amdgpu_crtc *disconnected_acrtc;
9058 struct dm_crtc_state *acrtc_state;
9060 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9063 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9064 if (!disconnected_acrtc)
9067 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9068 if (!acrtc_state->stream)
9072 * If the previous sink is not released and different from the current,
9073 * we deduce we are in a state where we can not rely on usermode call
9074 * to turn on the display, so we do it here
9076 if (acrtc_state->stream->sink != aconnector->dc_sink)
9077 dm_force_atomic_commit(&aconnector->base);
9081 * Grabs all modesetting locks to serialize against any blocking commits,
9082 * Waits for completion of all non blocking commits.
9084 static int do_aquire_global_lock(struct drm_device *dev,
9085 struct drm_atomic_state *state)
9087 struct drm_crtc *crtc;
9088 struct drm_crtc_commit *commit;
9092 * Adding all modeset locks to aquire_ctx will
9093 * ensure that when the framework release it the
9094 * extra locks we are locking here will get released to
9096 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9100 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9101 spin_lock(&crtc->commit_lock);
9102 commit = list_first_entry_or_null(&crtc->commit_list,
9103 struct drm_crtc_commit, commit_entry);
9105 drm_crtc_commit_get(commit);
9106 spin_unlock(&crtc->commit_lock);
9112 * Make sure all pending HW programming completed and
9115 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9118 ret = wait_for_completion_interruptible_timeout(
9119 &commit->flip_done, 10*HZ);
9122 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9123 "timed out\n", crtc->base.id, crtc->name);
9125 drm_crtc_commit_put(commit);
9128 return ret < 0 ? ret : 0;
9131 static void get_freesync_config_for_crtc(
9132 struct dm_crtc_state *new_crtc_state,
9133 struct dm_connector_state *new_con_state)
9135 struct mod_freesync_config config = {0};
9136 struct amdgpu_dm_connector *aconnector =
9137 to_amdgpu_dm_connector(new_con_state->base.connector);
9138 struct drm_display_mode *mode = &new_crtc_state->base.mode;
9139 int vrefresh = drm_mode_vrefresh(mode);
9140 bool fs_vid_mode = false;
9142 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9143 vrefresh >= aconnector->min_vfreq &&
9144 vrefresh <= aconnector->max_vfreq;
9146 if (new_crtc_state->vrr_supported) {
9147 new_crtc_state->stream->ignore_msa_timing_param = true;
9148 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9150 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9151 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9152 config.vsif_supported = true;
9156 config.state = VRR_STATE_ACTIVE_FIXED;
9157 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9159 } else if (new_crtc_state->base.vrr_enabled) {
9160 config.state = VRR_STATE_ACTIVE_VARIABLE;
9162 config.state = VRR_STATE_INACTIVE;
9166 new_crtc_state->freesync_config = config;
9169 static void reset_freesync_config_for_crtc(
9170 struct dm_crtc_state *new_crtc_state)
9172 new_crtc_state->vrr_supported = false;
9174 memset(&new_crtc_state->vrr_infopacket, 0,
9175 sizeof(new_crtc_state->vrr_infopacket));
9179 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9180 struct drm_crtc_state *new_crtc_state)
9182 struct drm_display_mode old_mode, new_mode;
9184 if (!old_crtc_state || !new_crtc_state)
9187 old_mode = old_crtc_state->mode;
9188 new_mode = new_crtc_state->mode;
9190 if (old_mode.clock == new_mode.clock &&
9191 old_mode.hdisplay == new_mode.hdisplay &&
9192 old_mode.vdisplay == new_mode.vdisplay &&
9193 old_mode.htotal == new_mode.htotal &&
9194 old_mode.vtotal != new_mode.vtotal &&
9195 old_mode.hsync_start == new_mode.hsync_start &&
9196 old_mode.vsync_start != new_mode.vsync_start &&
9197 old_mode.hsync_end == new_mode.hsync_end &&
9198 old_mode.vsync_end != new_mode.vsync_end &&
9199 old_mode.hskew == new_mode.hskew &&
9200 old_mode.vscan == new_mode.vscan &&
9201 (old_mode.vsync_end - old_mode.vsync_start) ==
9202 (new_mode.vsync_end - new_mode.vsync_start))
9208 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9209 uint64_t num, den, res;
9210 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9212 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9214 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9215 den = (unsigned long long)new_crtc_state->mode.htotal *
9216 (unsigned long long)new_crtc_state->mode.vtotal;
9218 res = div_u64(num, den);
9219 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9222 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9223 struct drm_atomic_state *state,
9224 struct drm_crtc *crtc,
9225 struct drm_crtc_state *old_crtc_state,
9226 struct drm_crtc_state *new_crtc_state,
9228 bool *lock_and_validation_needed)
9230 struct dm_atomic_state *dm_state = NULL;
9231 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9232 struct dc_stream_state *new_stream;
9236 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9237 * update changed items
9239 struct amdgpu_crtc *acrtc = NULL;
9240 struct amdgpu_dm_connector *aconnector = NULL;
9241 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9242 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
9246 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9247 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9248 acrtc = to_amdgpu_crtc(crtc);
9249 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
9251 /* TODO This hack should go away */
9252 if (aconnector && enable) {
9253 /* Make sure fake sink is created in plug-in scenario */
9254 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9256 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9259 if (IS_ERR(drm_new_conn_state)) {
9260 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9264 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9265 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
9267 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9270 new_stream = create_validate_stream_for_sink(aconnector,
9271 &new_crtc_state->mode,
9273 dm_old_crtc_state->stream);
9276 * we can have no stream on ACTION_SET if a display
9277 * was disconnected during S3, in this case it is not an
9278 * error, the OS will be updated after detection, and
9279 * will do the right thing on next atomic commit
9283 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9284 __func__, acrtc->base.base.id);
9290 * TODO: Check VSDB bits to decide whether this should
9291 * be enabled or not.
9293 new_stream->triggered_crtc_reset.enabled =
9294 dm->force_timing_sync;
9296 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9298 ret = fill_hdr_info_packet(drm_new_conn_state,
9299 &new_stream->hdr_static_metadata);
9304 * If we already removed the old stream from the context
9305 * (and set the new stream to NULL) then we can't reuse
9306 * the old stream even if the stream and scaling are unchanged.
9307 * We'll hit the BUG_ON and black screen.
9309 * TODO: Refactor this function to allow this check to work
9310 * in all conditions.
9312 if (amdgpu_freesync_vid_mode &&
9313 dm_new_crtc_state->stream &&
9314 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9317 if (dm_new_crtc_state->stream &&
9318 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9319 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9320 new_crtc_state->mode_changed = false;
9321 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9322 new_crtc_state->mode_changed);
9326 /* mode_changed flag may get updated above, need to check again */
9327 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9331 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9332 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9333 "connectors_changed:%d\n",
9335 new_crtc_state->enable,
9336 new_crtc_state->active,
9337 new_crtc_state->planes_changed,
9338 new_crtc_state->mode_changed,
9339 new_crtc_state->active_changed,
9340 new_crtc_state->connectors_changed);
9342 /* Remove stream for any changed/disabled CRTC */
9345 if (!dm_old_crtc_state->stream)
9348 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
9349 is_timing_unchanged_for_freesync(new_crtc_state,
9351 new_crtc_state->mode_changed = false;
9353 "Mode change not required for front porch change, "
9354 "setting mode_changed to %d",
9355 new_crtc_state->mode_changed);
9357 set_freesync_fixed_config(dm_new_crtc_state);
9360 } else if (amdgpu_freesync_vid_mode && aconnector &&
9361 is_freesync_video_mode(&new_crtc_state->mode,
9363 set_freesync_fixed_config(dm_new_crtc_state);
9366 ret = dm_atomic_get_state(state, &dm_state);
9370 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9373 /* i.e. reset mode */
9374 if (dc_remove_stream_from_ctx(
9377 dm_old_crtc_state->stream) != DC_OK) {
9382 dc_stream_release(dm_old_crtc_state->stream);
9383 dm_new_crtc_state->stream = NULL;
9385 reset_freesync_config_for_crtc(dm_new_crtc_state);
9387 *lock_and_validation_needed = true;
9389 } else {/* Add stream for any updated/enabled CRTC */
9391 * Quick fix to prevent NULL pointer on new_stream when
9392 * added MST connectors not found in existing crtc_state in the chained mode
9393 * TODO: need to dig out the root cause of that
9395 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
9398 if (modereset_required(new_crtc_state))
9401 if (modeset_required(new_crtc_state, new_stream,
9402 dm_old_crtc_state->stream)) {
9404 WARN_ON(dm_new_crtc_state->stream);
9406 ret = dm_atomic_get_state(state, &dm_state);
9410 dm_new_crtc_state->stream = new_stream;
9412 dc_stream_retain(new_stream);
9414 DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
9417 if (dc_add_stream_to_ctx(
9420 dm_new_crtc_state->stream) != DC_OK) {
9425 *lock_and_validation_needed = true;
9430 /* Release extra reference */
9432 dc_stream_release(new_stream);
9435 * We want to do dc stream updates that do not require a
9436 * full modeset below.
9438 if (!(enable && aconnector && new_crtc_state->active))
9441 * Given above conditions, the dc state cannot be NULL because:
9442 * 1. We're in the process of enabling CRTCs (just been added
9443 * to the dc context, or already is on the context)
9444 * 2. Has a valid connector attached, and
9445 * 3. Is currently active and enabled.
9446 * => The dc stream state currently exists.
9448 BUG_ON(dm_new_crtc_state->stream == NULL);
9450 /* Scaling or underscan settings */
9451 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
9452 update_stream_scaling_settings(
9453 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
9456 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9459 * Color management settings. We also update color properties
9460 * when a modeset is needed, to ensure it gets reprogrammed.
9462 if (dm_new_crtc_state->base.color_mgmt_changed ||
9463 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9464 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
9469 /* Update Freesync settings. */
9470 get_freesync_config_for_crtc(dm_new_crtc_state,
9477 dc_stream_release(new_stream);
9481 static bool should_reset_plane(struct drm_atomic_state *state,
9482 struct drm_plane *plane,
9483 struct drm_plane_state *old_plane_state,
9484 struct drm_plane_state *new_plane_state)
9486 struct drm_plane *other;
9487 struct drm_plane_state *old_other_state, *new_other_state;
9488 struct drm_crtc_state *new_crtc_state;
9492 * TODO: Remove this hack once the checks below are sufficient
9493 * enough to determine when we need to reset all the planes on
9496 if (state->allow_modeset)
9499 /* Exit early if we know that we're adding or removing the plane. */
9500 if (old_plane_state->crtc != new_plane_state->crtc)
9503 /* old crtc == new_crtc == NULL, plane not in context. */
9504 if (!new_plane_state->crtc)
9508 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9510 if (!new_crtc_state)
9513 /* CRTC Degamma changes currently require us to recreate planes. */
9514 if (new_crtc_state->color_mgmt_changed)
9517 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9521 * If there are any new primary or overlay planes being added or
9522 * removed then the z-order can potentially change. To ensure
9523 * correct z-order and pipe acquisition the current DC architecture
9524 * requires us to remove and recreate all existing planes.
9526 * TODO: Come up with a more elegant solution for this.
9528 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9529 struct amdgpu_framebuffer *old_afb, *new_afb;
9530 if (other->type == DRM_PLANE_TYPE_CURSOR)
9533 if (old_other_state->crtc != new_plane_state->crtc &&
9534 new_other_state->crtc != new_plane_state->crtc)
9537 if (old_other_state->crtc != new_other_state->crtc)
9540 /* Src/dst size and scaling updates. */
9541 if (old_other_state->src_w != new_other_state->src_w ||
9542 old_other_state->src_h != new_other_state->src_h ||
9543 old_other_state->crtc_w != new_other_state->crtc_w ||
9544 old_other_state->crtc_h != new_other_state->crtc_h)
9547 /* Rotation / mirroring updates. */
9548 if (old_other_state->rotation != new_other_state->rotation)
9551 /* Blending updates. */
9552 if (old_other_state->pixel_blend_mode !=
9553 new_other_state->pixel_blend_mode)
9556 /* Alpha updates. */
9557 if (old_other_state->alpha != new_other_state->alpha)
9560 /* Colorspace changes. */
9561 if (old_other_state->color_range != new_other_state->color_range ||
9562 old_other_state->color_encoding != new_other_state->color_encoding)
9565 /* Framebuffer checks fall at the end. */
9566 if (!old_other_state->fb || !new_other_state->fb)
9569 /* Pixel format changes can require bandwidth updates. */
9570 if (old_other_state->fb->format != new_other_state->fb->format)
9573 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9574 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9576 /* Tiling and DCC changes also require bandwidth updates. */
9577 if (old_afb->tiling_flags != new_afb->tiling_flags ||
9578 old_afb->base.modifier != new_afb->base.modifier)
9585 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9586 struct drm_plane_state *new_plane_state,
9587 struct drm_framebuffer *fb)
9589 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9590 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9594 if (fb->width > new_acrtc->max_cursor_width ||
9595 fb->height > new_acrtc->max_cursor_height) {
9596 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9597 new_plane_state->fb->width,
9598 new_plane_state->fb->height);
9601 if (new_plane_state->src_w != fb->width << 16 ||
9602 new_plane_state->src_h != fb->height << 16) {
9603 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9607 /* Pitch in pixels */
9608 pitch = fb->pitches[0] / fb->format->cpp[0];
9610 if (fb->width != pitch) {
9611 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9620 /* FB pitch is supported by cursor plane */
9623 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9627 /* Core DRM takes care of checking FB modifiers, so we only need to
9628 * check tiling flags when the FB doesn't have a modifier. */
9629 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9630 if (adev->family < AMDGPU_FAMILY_AI) {
9631 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9632 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9633 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9635 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9638 DRM_DEBUG_ATOMIC("Cursor FB not linear");
9646 static int dm_update_plane_state(struct dc *dc,
9647 struct drm_atomic_state *state,
9648 struct drm_plane *plane,
9649 struct drm_plane_state *old_plane_state,
9650 struct drm_plane_state *new_plane_state,
9652 bool *lock_and_validation_needed)
9655 struct dm_atomic_state *dm_state = NULL;
9656 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9657 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9658 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9659 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9660 struct amdgpu_crtc *new_acrtc;
9665 new_plane_crtc = new_plane_state->crtc;
9666 old_plane_crtc = old_plane_state->crtc;
9667 dm_new_plane_state = to_dm_plane_state(new_plane_state);
9668 dm_old_plane_state = to_dm_plane_state(old_plane_state);
9670 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9671 if (!enable || !new_plane_crtc ||
9672 drm_atomic_plane_disabling(plane->state, new_plane_state))
9675 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9677 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9678 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9682 if (new_plane_state->fb) {
9683 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9684 new_plane_state->fb);
9692 needs_reset = should_reset_plane(state, plane, old_plane_state,
9695 /* Remove any changed/removed planes */
9700 if (!old_plane_crtc)
9703 old_crtc_state = drm_atomic_get_old_crtc_state(
9704 state, old_plane_crtc);
9705 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9707 if (!dm_old_crtc_state->stream)
9710 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9711 plane->base.id, old_plane_crtc->base.id);
9713 ret = dm_atomic_get_state(state, &dm_state);
9717 if (!dc_remove_plane_from_context(
9719 dm_old_crtc_state->stream,
9720 dm_old_plane_state->dc_state,
9721 dm_state->context)) {
9727 dc_plane_state_release(dm_old_plane_state->dc_state);
9728 dm_new_plane_state->dc_state = NULL;
9730 *lock_and_validation_needed = true;
9732 } else { /* Add new planes */
9733 struct dc_plane_state *dc_new_plane_state;
9735 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9738 if (!new_plane_crtc)
9741 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9742 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9744 if (!dm_new_crtc_state->stream)
9750 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9754 WARN_ON(dm_new_plane_state->dc_state);
9756 dc_new_plane_state = dc_create_plane_state(dc);
9757 if (!dc_new_plane_state)
9760 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
9761 plane->base.id, new_plane_crtc->base.id);
9763 ret = fill_dc_plane_attributes(
9764 drm_to_adev(new_plane_crtc->dev),
9769 dc_plane_state_release(dc_new_plane_state);
9773 ret = dm_atomic_get_state(state, &dm_state);
9775 dc_plane_state_release(dc_new_plane_state);
9780 * Any atomic check errors that occur after this will
9781 * not need a release. The plane state will be attached
9782 * to the stream, and therefore part of the atomic
9783 * state. It'll be released when the atomic state is
9786 if (!dc_add_plane_to_context(
9788 dm_new_crtc_state->stream,
9790 dm_state->context)) {
9792 dc_plane_state_release(dc_new_plane_state);
9796 dm_new_plane_state->dc_state = dc_new_plane_state;
9798 /* Tell DC to do a full surface update every time there
9799 * is a plane change. Inefficient, but works for now.
9801 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9803 *lock_and_validation_needed = true;
9810 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9811 struct drm_crtc *crtc,
9812 struct drm_crtc_state *new_crtc_state)
9814 struct drm_plane_state *new_cursor_state, *new_primary_state;
9815 int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9817 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9818 * cursor per pipe but it's going to inherit the scaling and
9819 * positioning from the underlying pipe. Check the cursor plane's
9820 * blending properties match the primary plane's. */
9822 new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9823 new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9824 if (!new_cursor_state || !new_primary_state ||
9825 !new_cursor_state->fb || !new_primary_state->fb) {
9829 cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9830 (new_cursor_state->src_w >> 16);
9831 cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9832 (new_cursor_state->src_h >> 16);
9834 primary_scale_w = new_primary_state->crtc_w * 1000 /
9835 (new_primary_state->src_w >> 16);
9836 primary_scale_h = new_primary_state->crtc_h * 1000 /
9837 (new_primary_state->src_h >> 16);
9839 if (cursor_scale_w != primary_scale_w ||
9840 cursor_scale_h != primary_scale_h) {
9841 DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9848 #if defined(CONFIG_DRM_AMD_DC_DCN)
9849 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9851 struct drm_connector *connector;
9852 struct drm_connector_state *conn_state;
9853 struct amdgpu_dm_connector *aconnector = NULL;
9855 for_each_new_connector_in_state(state, connector, conn_state, i) {
9856 if (conn_state->crtc != crtc)
9859 aconnector = to_amdgpu_dm_connector(connector);
9860 if (!aconnector->port || !aconnector->mst_port)
9869 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9873 static int validate_overlay(struct drm_atomic_state *state)
9876 struct drm_plane *plane;
9877 struct drm_plane_state *old_plane_state, *new_plane_state;
9878 struct drm_plane_state *primary_state, *overlay_state = NULL;
9880 /* Check if primary plane is contained inside overlay */
9881 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9882 if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
9883 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9886 overlay_state = new_plane_state;
9891 /* check if we're making changes to the overlay plane */
9895 /* check if overlay plane is enabled */
9896 if (!overlay_state->crtc)
9899 /* find the primary plane for the CRTC that the overlay is enabled on */
9900 primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
9901 if (IS_ERR(primary_state))
9902 return PTR_ERR(primary_state);
9904 /* check if primary plane is enabled */
9905 if (!primary_state->crtc)
9908 /* Perform the bounds check to ensure the overlay plane covers the primary */
9909 if (primary_state->crtc_x < overlay_state->crtc_x ||
9910 primary_state->crtc_y < overlay_state->crtc_y ||
9911 primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
9912 primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
9913 DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
9921 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9922 * @dev: The DRM device
9923 * @state: The atomic state to commit
9925 * Validate that the given atomic state is programmable by DC into hardware.
9926 * This involves constructing a &struct dc_state reflecting the new hardware
9927 * state we wish to commit, then querying DC to see if it is programmable. It's
9928 * important not to modify the existing DC state. Otherwise, atomic_check
9929 * may unexpectedly commit hardware changes.
9931 * When validating the DC state, it's important that the right locks are
9932 * acquired. For full updates case which removes/adds/updates streams on one
9933 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9934 * that any such full update commit will wait for completion of any outstanding
9935 * flip using DRMs synchronization events.
9937 * Note that DM adds the affected connectors for all CRTCs in state, when that
9938 * might not seem necessary. This is because DC stream creation requires the
9939 * DC sink, which is tied to the DRM connector state. Cleaning this up should
9940 * be possible but non-trivial - a possible TODO item.
9942 * Return: -Error code if validation failed.
9944 static int amdgpu_dm_atomic_check(struct drm_device *dev,
9945 struct drm_atomic_state *state)
9947 struct amdgpu_device *adev = drm_to_adev(dev);
9948 struct dm_atomic_state *dm_state = NULL;
9949 struct dc *dc = adev->dm.dc;
9950 struct drm_connector *connector;
9951 struct drm_connector_state *old_con_state, *new_con_state;
9952 struct drm_crtc *crtc;
9953 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9954 struct drm_plane *plane;
9955 struct drm_plane_state *old_plane_state, *new_plane_state;
9956 enum dc_status status;
9958 bool lock_and_validation_needed = false;
9959 struct dm_crtc_state *dm_old_crtc_state;
9961 trace_amdgpu_dm_atomic_check_begin(state);
9963 ret = drm_atomic_helper_check_modeset(dev, state);
9967 /* Check connector changes */
9968 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9969 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9970 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9972 /* Skip connectors that are disabled or part of modeset already. */
9973 if (!old_con_state->crtc && !new_con_state->crtc)
9976 if (!new_con_state->crtc)
9979 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9980 if (IS_ERR(new_crtc_state)) {
9981 ret = PTR_ERR(new_crtc_state);
9985 if (dm_old_con_state->abm_level !=
9986 dm_new_con_state->abm_level)
9987 new_crtc_state->connectors_changed = true;
9990 #if defined(CONFIG_DRM_AMD_DC_DCN)
9991 if (dc_resource_is_dsc_encoding_supported(dc)) {
9992 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9993 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9994 ret = add_affected_mst_dsc_crtcs(state, crtc);
10001 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10002 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10004 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10005 !new_crtc_state->color_mgmt_changed &&
10006 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10007 dm_old_crtc_state->dsc_force_changed == false)
10010 if (!new_crtc_state->enable)
10013 ret = drm_atomic_add_affected_connectors(state, crtc);
10017 ret = drm_atomic_add_affected_planes(state, crtc);
10021 if (dm_old_crtc_state->dsc_force_changed)
10022 new_crtc_state->mode_changed = true;
10026 * Add all primary and overlay planes on the CRTC to the state
10027 * whenever a plane is enabled to maintain correct z-ordering
10028 * and to enable fast surface updates.
10030 drm_for_each_crtc(crtc, dev) {
10031 bool modified = false;
10033 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10034 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10037 if (new_plane_state->crtc == crtc ||
10038 old_plane_state->crtc == crtc) {
10047 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10048 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10052 drm_atomic_get_plane_state(state, plane);
10054 if (IS_ERR(new_plane_state)) {
10055 ret = PTR_ERR(new_plane_state);
10061 /* Remove exiting planes if they are modified */
10062 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10063 ret = dm_update_plane_state(dc, state, plane,
10067 &lock_and_validation_needed);
10072 /* Disable all crtcs which require disable */
10073 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10074 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10078 &lock_and_validation_needed);
10083 /* Enable all crtcs which require enable */
10084 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10085 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10089 &lock_and_validation_needed);
10094 ret = validate_overlay(state);
10098 /* Add new/modified planes */
10099 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10100 ret = dm_update_plane_state(dc, state, plane,
10104 &lock_and_validation_needed);
10109 /* Run this here since we want to validate the streams we created */
10110 ret = drm_atomic_helper_check_planes(dev, state);
10114 /* Check cursor planes scaling */
10115 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10116 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10121 if (state->legacy_cursor_update) {
10123 * This is a fast cursor update coming from the plane update
10124 * helper, check if it can be done asynchronously for better
10127 state->async_update =
10128 !drm_atomic_helper_async_check(dev, state);
10131 * Skip the remaining global validation if this is an async
10132 * update. Cursor updates can be done without affecting
10133 * state or bandwidth calcs and this avoids the performance
10134 * penalty of locking the private state object and
10135 * allocating a new dc_state.
10137 if (state->async_update)
10141 /* Check scaling and underscan changes*/
10142 /* TODO Removed scaling changes validation due to inability to commit
10143 * new stream into context w\o causing full reset. Need to
10144 * decide how to handle.
10146 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10147 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10148 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10149 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10151 /* Skip any modesets/resets */
10152 if (!acrtc || drm_atomic_crtc_needs_modeset(
10153 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10156 /* Skip any thing not scale or underscan changes */
10157 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10160 lock_and_validation_needed = true;
10164 * Streams and planes are reset when there are changes that affect
10165 * bandwidth. Anything that affects bandwidth needs to go through
10166 * DC global validation to ensure that the configuration can be applied
10169 * We have to currently stall out here in atomic_check for outstanding
10170 * commits to finish in this case because our IRQ handlers reference
10171 * DRM state directly - we can end up disabling interrupts too early
10174 * TODO: Remove this stall and drop DM state private objects.
10176 if (lock_and_validation_needed) {
10177 ret = dm_atomic_get_state(state, &dm_state);
10181 ret = do_aquire_global_lock(dev, state);
10185 #if defined(CONFIG_DRM_AMD_DC_DCN)
10186 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
10189 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
10195 * Perform validation of MST topology in the state:
10196 * We need to perform MST atomic check before calling
10197 * dc_validate_global_state(), or there is a chance
10198 * to get stuck in an infinite loop and hang eventually.
10200 ret = drm_dp_mst_atomic_check(state);
10203 status = dc_validate_global_state(dc, dm_state->context, false);
10204 if (status != DC_OK) {
10205 DC_LOG_WARNING("DC global validation failure: %s (%d)",
10206 dc_status_to_str(status), status);
10212 * The commit is a fast update. Fast updates shouldn't change
10213 * the DC context, affect global validation, and can have their
10214 * commit work done in parallel with other commits not touching
10215 * the same resource. If we have a new DC context as part of
10216 * the DM atomic state from validation we need to free it and
10217 * retain the existing one instead.
10219 * Furthermore, since the DM atomic state only contains the DC
10220 * context and can safely be annulled, we can free the state
10221 * and clear the associated private object now to free
10222 * some memory and avoid a possible use-after-free later.
10225 for (i = 0; i < state->num_private_objs; i++) {
10226 struct drm_private_obj *obj = state->private_objs[i].ptr;
10228 if (obj->funcs == adev->dm.atomic_obj.funcs) {
10229 int j = state->num_private_objs-1;
10231 dm_atomic_destroy_state(obj,
10232 state->private_objs[i].state);
10234 /* If i is not at the end of the array then the
10235 * last element needs to be moved to where i was
10236 * before the array can safely be truncated.
10239 state->private_objs[i] =
10240 state->private_objs[j];
10242 state->private_objs[j].ptr = NULL;
10243 state->private_objs[j].state = NULL;
10244 state->private_objs[j].old_state = NULL;
10245 state->private_objs[j].new_state = NULL;
10247 state->num_private_objs = j;
10253 /* Store the overall update type for use later in atomic check. */
10254 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10255 struct dm_crtc_state *dm_new_crtc_state =
10256 to_dm_crtc_state(new_crtc_state);
10258 dm_new_crtc_state->update_type = lock_and_validation_needed ?
10263 /* Must be success */
10266 trace_amdgpu_dm_atomic_check_finish(state, ret);
10271 if (ret == -EDEADLK)
10272 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
10273 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
10274 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
10276 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
10278 trace_amdgpu_dm_atomic_check_finish(state, ret);
10283 static bool is_dp_capable_without_timing_msa(struct dc *dc,
10284 struct amdgpu_dm_connector *amdgpu_dm_connector)
10287 bool capable = false;
10289 if (amdgpu_dm_connector->dc_link &&
10290 dm_helpers_dp_read_dpcd(
10292 amdgpu_dm_connector->dc_link,
10293 DP_DOWN_STREAM_PORT_COUNT,
10295 sizeof(dpcd_data))) {
10296 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10302 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10303 uint8_t *edid_ext, int len,
10304 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10307 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10308 struct dc *dc = adev->dm.dc;
10310 /* send extension block to DMCU for parsing */
10311 for (i = 0; i < len; i += 8) {
10315 /* send 8 bytes a time */
10316 if (!dc_edid_parser_send_cea(dc, i, len, &edid_ext[i], 8))
10320 /* EDID block sent completed, expect result */
10321 int version, min_rate, max_rate;
10323 res = dc_edid_parser_recv_amd_vsdb(dc, &version, &min_rate, &max_rate);
10325 /* amd vsdb found */
10326 vsdb_info->freesync_supported = 1;
10327 vsdb_info->amd_vsdb_version = version;
10328 vsdb_info->min_refresh_rate_hz = min_rate;
10329 vsdb_info->max_refresh_rate_hz = max_rate;
10337 res = dc_edid_parser_recv_cea_ack(dc, &offset);
10345 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10346 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10348 uint8_t *edid_ext = NULL;
10350 bool valid_vsdb_found = false;
10352 /*----- drm_find_cea_extension() -----*/
10353 /* No EDID or EDID extensions */
10354 if (edid == NULL || edid->extensions == 0)
10357 /* Find CEA extension */
10358 for (i = 0; i < edid->extensions; i++) {
10359 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10360 if (edid_ext[0] == CEA_EXT)
10364 if (i == edid->extensions)
10367 /*----- cea_db_offsets() -----*/
10368 if (edid_ext[0] != CEA_EXT)
10371 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
10373 return valid_vsdb_found ? i : -ENODEV;
10376 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10380 struct detailed_timing *timing;
10381 struct detailed_non_pixel *data;
10382 struct detailed_data_monitor_range *range;
10383 struct amdgpu_dm_connector *amdgpu_dm_connector =
10384 to_amdgpu_dm_connector(connector);
10385 struct dm_connector_state *dm_con_state = NULL;
10387 struct drm_device *dev = connector->dev;
10388 struct amdgpu_device *adev = drm_to_adev(dev);
10389 bool freesync_capable = false;
10390 struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
10392 if (!connector->state) {
10393 DRM_ERROR("%s - Connector has no state", __func__);
10398 dm_con_state = to_dm_connector_state(connector->state);
10400 amdgpu_dm_connector->min_vfreq = 0;
10401 amdgpu_dm_connector->max_vfreq = 0;
10402 amdgpu_dm_connector->pixel_clock_mhz = 0;
10407 dm_con_state = to_dm_connector_state(connector->state);
10409 if (!amdgpu_dm_connector->dc_sink) {
10410 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
10413 if (!adev->dm.freesync_module)
10417 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10418 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
10419 bool edid_check_required = false;
10422 edid_check_required = is_dp_capable_without_timing_msa(
10424 amdgpu_dm_connector);
10427 if (edid_check_required == true && (edid->version > 1 ||
10428 (edid->version == 1 && edid->revision > 1))) {
10429 for (i = 0; i < 4; i++) {
10431 timing = &edid->detailed_timings[i];
10432 data = &timing->data.other_data;
10433 range = &data->data.range;
10435 * Check if monitor has continuous frequency mode
10437 if (data->type != EDID_DETAIL_MONITOR_RANGE)
10440 * Check for flag range limits only. If flag == 1 then
10441 * no additional timing information provided.
10442 * Default GTF, GTF Secondary curve and CVT are not
10445 if (range->flags != 1)
10448 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10449 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10450 amdgpu_dm_connector->pixel_clock_mhz =
10451 range->pixel_clock_mhz * 10;
10453 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10454 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10459 if (amdgpu_dm_connector->max_vfreq -
10460 amdgpu_dm_connector->min_vfreq > 10) {
10462 freesync_capable = true;
10465 } else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10466 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10467 if (i >= 0 && vsdb_info.freesync_supported) {
10468 timing = &edid->detailed_timings[i];
10469 data = &timing->data.other_data;
10471 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10472 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10473 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10474 freesync_capable = true;
10476 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10477 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10483 dm_con_state->freesync_capable = freesync_capable;
10485 if (connector->vrr_capable_property)
10486 drm_connector_set_vrr_capable_property(connector,
10490 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
10492 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
10494 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
10496 if (link->type == dc_connection_none)
10498 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
10499 dpcd_data, sizeof(dpcd_data))) {
10500 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
10502 if (dpcd_data[0] == 0) {
10503 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
10504 link->psr_settings.psr_feature_enabled = false;
10506 link->psr_settings.psr_version = DC_PSR_VERSION_1;
10507 link->psr_settings.psr_feature_enabled = true;
10510 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
10515 * amdgpu_dm_link_setup_psr() - configure psr link
10516 * @stream: stream state
10518 * Return: true if success
10520 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
10522 struct dc_link *link = NULL;
10523 struct psr_config psr_config = {0};
10524 struct psr_context psr_context = {0};
10527 if (stream == NULL)
10530 link = stream->link;
10532 psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
10534 if (psr_config.psr_version > 0) {
10535 psr_config.psr_exit_link_training_required = 0x1;
10536 psr_config.psr_frame_capture_indication_req = 0;
10537 psr_config.psr_rfb_setup_time = 0x37;
10538 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
10539 psr_config.allow_smu_optimizations = 0x0;
10541 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
10544 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
10550 * amdgpu_dm_psr_enable() - enable psr f/w
10551 * @stream: stream state
10553 * Return: true if success
10555 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
10557 struct dc_link *link = stream->link;
10558 unsigned int vsync_rate_hz = 0;
10559 struct dc_static_screen_params params = {0};
10560 /* Calculate number of static frames before generating interrupt to
10563 // Init fail safe of 2 frames static
10564 unsigned int num_frames_static = 2;
10566 DRM_DEBUG_DRIVER("Enabling psr...\n");
10568 vsync_rate_hz = div64_u64(div64_u64((
10569 stream->timing.pix_clk_100hz * 100),
10570 stream->timing.v_total),
10571 stream->timing.h_total);
10574 * Calculate number of frames such that at least 30 ms of time has
10577 if (vsync_rate_hz != 0) {
10578 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
10579 num_frames_static = (30000 / frame_time_microsec) + 1;
10582 params.triggers.cursor_update = true;
10583 params.triggers.overlay_update = true;
10584 params.triggers.surface_update = true;
10585 params.num_frames = num_frames_static;
10587 dc_stream_set_static_screen_params(link->ctx->dc,
10591 return dc_link_set_psr_allow_active(link, true, false, false);
10595 * amdgpu_dm_psr_disable() - disable psr f/w
10596 * @stream: stream state
10598 * Return: true if success
10600 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
10603 DRM_DEBUG_DRIVER("Disabling psr...\n");
10605 return dc_link_set_psr_allow_active(stream->link, false, true, false);
10609 * amdgpu_dm_psr_disable() - disable psr f/w
10610 * if psr is enabled on any stream
10612 * Return: true if success
10614 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
10616 DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
10617 return dc_set_psr_allow_active(dm->dc, false);
10620 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10622 struct amdgpu_device *adev = drm_to_adev(dev);
10623 struct dc *dc = adev->dm.dc;
10626 mutex_lock(&adev->dm.dc_lock);
10627 if (dc->current_state) {
10628 for (i = 0; i < dc->current_state->stream_count; ++i)
10629 dc->current_state->streams[i]
10630 ->triggered_crtc_reset.enabled =
10631 adev->dm.force_timing_sync;
10633 dm_enable_per_frame_crtc_master_sync(dc->current_state);
10634 dc_trigger_sync(dc, dc->current_state);
10636 mutex_unlock(&adev->dm.dc_lock);
10639 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10640 uint32_t value, const char *func_name)
10642 #ifdef DM_CHECK_ADDR_0
10643 if (address == 0) {
10644 DC_ERR("invalid register write. address = 0");
10648 cgs_write_register(ctx->cgs_device, address, value);
10649 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10652 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10653 const char *func_name)
10656 #ifdef DM_CHECK_ADDR_0
10657 if (address == 0) {
10658 DC_ERR("invalid register read; address = 0\n");
10663 if (ctx->dmub_srv &&
10664 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10665 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10670 value = cgs_read_register(ctx->cgs_device, address);
10672 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);