2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
29 #include "dm_services_types.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "dc/dc_edid_parser.h"
38 #include "amdgpu_dm_trace.h"
42 #include "amdgpu_display.h"
43 #include "amdgpu_ucode.h"
45 #include "amdgpu_dm.h"
46 #ifdef CONFIG_DRM_AMD_DC_HDCP
47 #include "amdgpu_dm_hdcp.h"
48 #include <drm/drm_hdcp.h>
50 #include "amdgpu_pm.h"
52 #include "amd_shared.h"
53 #include "amdgpu_dm_irq.h"
54 #include "dm_helpers.h"
55 #include "amdgpu_dm_mst_types.h"
56 #if defined(CONFIG_DEBUG_FS)
57 #include "amdgpu_dm_debugfs.h"
60 #include "ivsrcid/ivsrcid_vislands30.h"
62 #include <linux/module.h>
63 #include <linux/moduleparam.h>
64 #include <linux/types.h>
65 #include <linux/pm_runtime.h>
66 #include <linux/pci.h>
67 #include <linux/firmware.h>
68 #include <linux/component.h>
70 #include <drm/drm_atomic.h>
71 #include <drm/drm_atomic_uapi.h>
72 #include <drm/drm_atomic_helper.h>
73 #include <drm/drm_dp_mst_helper.h>
74 #include <drm/drm_fb_helper.h>
75 #include <drm/drm_fourcc.h>
76 #include <drm/drm_edid.h>
77 #include <drm/drm_vblank.h>
78 #include <drm/drm_audio_component.h>
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
88 #include "soc15_common.h"
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
98 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
99 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
100 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
101 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
103 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
105 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
108 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
109 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
111 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
112 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
114 /* Number of bytes in PSP header for firmware. */
115 #define PSP_HEADER_BYTES 0x100
117 /* Number of bytes in PSP footer for firmware. */
118 #define PSP_FOOTER_BYTES 0x100
123 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
124 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
125 * requests into DC requests, and DC responses into DRM responses.
127 * The root control structure is &struct amdgpu_display_manager.
130 /* basic init/fini API */
131 static int amdgpu_dm_init(struct amdgpu_device *adev);
132 static void amdgpu_dm_fini(struct amdgpu_device *adev);
133 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
135 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
137 switch (link->dpcd_caps.dongle_type) {
138 case DISPLAY_DONGLE_NONE:
139 return DRM_MODE_SUBCONNECTOR_Native;
140 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
141 return DRM_MODE_SUBCONNECTOR_VGA;
142 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
143 case DISPLAY_DONGLE_DP_DVI_DONGLE:
144 return DRM_MODE_SUBCONNECTOR_DVID;
145 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
146 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
147 return DRM_MODE_SUBCONNECTOR_HDMIA;
148 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
150 return DRM_MODE_SUBCONNECTOR_Unknown;
154 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
156 struct dc_link *link = aconnector->dc_link;
157 struct drm_connector *connector = &aconnector->base;
158 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
160 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
163 if (aconnector->dc_sink)
164 subconnector = get_subconnector_type(link);
166 drm_object_property_set_value(&connector->base,
167 connector->dev->mode_config.dp_subconnector_property,
172 * initializes drm_device display related structures, based on the information
173 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
174 * drm_encoder, drm_mode_config
176 * Returns 0 on success
178 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
179 /* removes and deallocates the drm structures, created by the above function */
180 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
182 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
183 struct drm_plane *plane,
184 unsigned long possible_crtcs,
185 const struct dc_plane_cap *plane_cap);
186 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
187 struct drm_plane *plane,
188 uint32_t link_index);
189 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
190 struct amdgpu_dm_connector *amdgpu_dm_connector,
192 struct amdgpu_encoder *amdgpu_encoder);
193 static int amdgpu_dm_encoder_init(struct drm_device *dev,
194 struct amdgpu_encoder *aencoder,
195 uint32_t link_index);
197 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
199 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
201 static int amdgpu_dm_atomic_check(struct drm_device *dev,
202 struct drm_atomic_state *state);
204 static void handle_cursor_update(struct drm_plane *plane,
205 struct drm_plane_state *old_plane_state);
207 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
208 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
209 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
210 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
211 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
213 static const struct drm_format_info *
214 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
217 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
218 struct drm_crtc_state *new_crtc_state);
220 * dm_vblank_get_counter
223 * Get counter for number of vertical blanks
226 * struct amdgpu_device *adev - [in] desired amdgpu device
227 * int disp_idx - [in] which CRTC to get the counter from
230 * Counter for vertical blanks
232 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
234 if (crtc >= adev->mode_info.num_crtc)
237 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
239 if (acrtc->dm_irq_params.stream == NULL) {
240 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
245 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
249 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
250 u32 *vbl, u32 *position)
252 uint32_t v_blank_start, v_blank_end, h_position, v_position;
254 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
257 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
259 if (acrtc->dm_irq_params.stream == NULL) {
260 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
266 * TODO rework base driver to use values directly.
267 * for now parse it back into reg-format
269 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
275 *position = v_position | (h_position << 16);
276 *vbl = v_blank_start | (v_blank_end << 16);
282 static bool dm_is_idle(void *handle)
288 static int dm_wait_for_idle(void *handle)
294 static bool dm_check_soft_reset(void *handle)
299 static int dm_soft_reset(void *handle)
305 static struct amdgpu_crtc *
306 get_crtc_by_otg_inst(struct amdgpu_device *adev,
309 struct drm_device *dev = adev_to_drm(adev);
310 struct drm_crtc *crtc;
311 struct amdgpu_crtc *amdgpu_crtc;
313 if (otg_inst == -1) {
315 return adev->mode_info.crtcs[0];
318 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
319 amdgpu_crtc = to_amdgpu_crtc(crtc);
321 if (amdgpu_crtc->otg_inst == otg_inst)
328 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
330 return acrtc->dm_irq_params.freesync_config.state ==
331 VRR_STATE_ACTIVE_VARIABLE ||
332 acrtc->dm_irq_params.freesync_config.state ==
333 VRR_STATE_ACTIVE_FIXED;
336 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
338 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
339 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
342 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
343 struct dm_crtc_state *new_state)
345 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
347 else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
354 * dm_pflip_high_irq() - Handle pageflip interrupt
355 * @interrupt_params: ignored
357 * Handles the pageflip interrupt by notifying all interested parties
358 * that the pageflip has been completed.
360 static void dm_pflip_high_irq(void *interrupt_params)
362 struct amdgpu_crtc *amdgpu_crtc;
363 struct common_irq_params *irq_params = interrupt_params;
364 struct amdgpu_device *adev = irq_params->adev;
366 struct drm_pending_vblank_event *e;
367 uint32_t vpos, hpos, v_blank_start, v_blank_end;
370 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
372 /* IRQ could occur when in initial stage */
373 /* TODO work and BO cleanup */
374 if (amdgpu_crtc == NULL) {
375 DC_LOG_PFLIP("CRTC is null, returning.\n");
379 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
381 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
382 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
383 amdgpu_crtc->pflip_status,
384 AMDGPU_FLIP_SUBMITTED,
385 amdgpu_crtc->crtc_id,
387 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
391 /* page flip completed. */
392 e = amdgpu_crtc->event;
393 amdgpu_crtc->event = NULL;
398 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
400 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
402 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
403 &v_blank_end, &hpos, &vpos) ||
404 (vpos < v_blank_start)) {
405 /* Update to correct count and vblank timestamp if racing with
406 * vblank irq. This also updates to the correct vblank timestamp
407 * even in VRR mode, as scanout is past the front-porch atm.
409 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
411 /* Wake up userspace by sending the pageflip event with proper
412 * count and timestamp of vblank of flip completion.
415 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
417 /* Event sent, so done with vblank for this flip */
418 drm_crtc_vblank_put(&amdgpu_crtc->base);
421 /* VRR active and inside front-porch: vblank count and
422 * timestamp for pageflip event will only be up to date after
423 * drm_crtc_handle_vblank() has been executed from late vblank
424 * irq handler after start of back-porch (vline 0). We queue the
425 * pageflip event for send-out by drm_crtc_handle_vblank() with
426 * updated timestamp and count, once it runs after us.
428 * We need to open-code this instead of using the helper
429 * drm_crtc_arm_vblank_event(), as that helper would
430 * call drm_crtc_accurate_vblank_count(), which we must
431 * not call in VRR mode while we are in front-porch!
434 /* sequence will be replaced by real count during send-out. */
435 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
436 e->pipe = amdgpu_crtc->crtc_id;
438 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
442 /* Keep track of vblank of this flip for flip throttling. We use the
443 * cooked hw counter, as that one incremented at start of this vblank
444 * of pageflip completion, so last_flip_vblank is the forbidden count
445 * for queueing new pageflips if vsync + VRR is enabled.
447 amdgpu_crtc->dm_irq_params.last_flip_vblank =
448 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
450 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
451 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
453 DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
454 amdgpu_crtc->crtc_id, amdgpu_crtc,
455 vrr_active, (int) !e);
458 static void dm_vupdate_high_irq(void *interrupt_params)
460 struct common_irq_params *irq_params = interrupt_params;
461 struct amdgpu_device *adev = irq_params->adev;
462 struct amdgpu_crtc *acrtc;
463 struct drm_device *drm_dev;
464 struct drm_vblank_crtc *vblank;
465 ktime_t frame_duration_ns, previous_timestamp;
469 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
472 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
473 drm_dev = acrtc->base.dev;
474 vblank = &drm_dev->vblank[acrtc->base.index];
475 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
476 frame_duration_ns = vblank->time - previous_timestamp;
478 if (frame_duration_ns > 0) {
479 trace_amdgpu_refresh_rate_track(acrtc->base.index,
481 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
482 atomic64_set(&irq_params->previous_timestamp, vblank->time);
485 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
489 /* Core vblank handling is done here after end of front-porch in
490 * vrr mode, as vblank timestamping will give valid results
491 * while now done after front-porch. This will also deliver
492 * page-flip completion events that have been queued to us
493 * if a pageflip happened inside front-porch.
496 drm_crtc_handle_vblank(&acrtc->base);
498 /* BTR processing for pre-DCE12 ASICs */
499 if (acrtc->dm_irq_params.stream &&
500 adev->family < AMDGPU_FAMILY_AI) {
501 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
502 mod_freesync_handle_v_update(
503 adev->dm.freesync_module,
504 acrtc->dm_irq_params.stream,
505 &acrtc->dm_irq_params.vrr_params);
507 dc_stream_adjust_vmin_vmax(
509 acrtc->dm_irq_params.stream,
510 &acrtc->dm_irq_params.vrr_params.adjust);
511 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
518 * dm_crtc_high_irq() - Handles CRTC interrupt
519 * @interrupt_params: used for determining the CRTC instance
521 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
524 static void dm_crtc_high_irq(void *interrupt_params)
526 struct common_irq_params *irq_params = interrupt_params;
527 struct amdgpu_device *adev = irq_params->adev;
528 struct amdgpu_crtc *acrtc;
532 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
536 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
538 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
539 vrr_active, acrtc->dm_irq_params.active_planes);
542 * Core vblank handling at start of front-porch is only possible
543 * in non-vrr mode, as only there vblank timestamping will give
544 * valid results while done in front-porch. Otherwise defer it
545 * to dm_vupdate_high_irq after end of front-porch.
548 drm_crtc_handle_vblank(&acrtc->base);
551 * Following stuff must happen at start of vblank, for crc
552 * computation and below-the-range btr support in vrr mode.
554 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
556 /* BTR updates need to happen before VUPDATE on Vega and above. */
557 if (adev->family < AMDGPU_FAMILY_AI)
560 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
562 if (acrtc->dm_irq_params.stream &&
563 acrtc->dm_irq_params.vrr_params.supported &&
564 acrtc->dm_irq_params.freesync_config.state ==
565 VRR_STATE_ACTIVE_VARIABLE) {
566 mod_freesync_handle_v_update(adev->dm.freesync_module,
567 acrtc->dm_irq_params.stream,
568 &acrtc->dm_irq_params.vrr_params);
570 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
571 &acrtc->dm_irq_params.vrr_params.adjust);
575 * If there aren't any active_planes then DCH HUBP may be clock-gated.
576 * In that case, pageflip completion interrupts won't fire and pageflip
577 * completion events won't get delivered. Prevent this by sending
578 * pending pageflip events from here if a flip is still pending.
580 * If any planes are enabled, use dm_pflip_high_irq() instead, to
581 * avoid race conditions between flip programming and completion,
582 * which could cause too early flip completion events.
584 if (adev->family >= AMDGPU_FAMILY_RV &&
585 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
586 acrtc->dm_irq_params.active_planes == 0) {
588 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
590 drm_crtc_vblank_put(&acrtc->base);
592 acrtc->pflip_status = AMDGPU_FLIP_NONE;
595 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
598 #if defined(CONFIG_DRM_AMD_DC_DCN)
600 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
601 * DCN generation ASICs
602 * @interrupt params - interrupt parameters
604 * Used to set crc window/read out crc value at vertical line 0 position
606 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
607 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
609 struct common_irq_params *irq_params = interrupt_params;
610 struct amdgpu_device *adev = irq_params->adev;
611 struct amdgpu_crtc *acrtc;
613 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
618 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
623 static int dm_set_clockgating_state(void *handle,
624 enum amd_clockgating_state state)
629 static int dm_set_powergating_state(void *handle,
630 enum amd_powergating_state state)
635 /* Prototypes of private functions */
636 static int dm_early_init(void* handle);
638 /* Allocate memory for FBC compressed data */
639 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
641 struct drm_device *dev = connector->dev;
642 struct amdgpu_device *adev = drm_to_adev(dev);
643 struct dm_compressor_info *compressor = &adev->dm.compressor;
644 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
645 struct drm_display_mode *mode;
646 unsigned long max_size = 0;
648 if (adev->dm.dc->fbc_compressor == NULL)
651 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
654 if (compressor->bo_ptr)
658 list_for_each_entry(mode, &connector->modes, head) {
659 if (max_size < mode->htotal * mode->vtotal)
660 max_size = mode->htotal * mode->vtotal;
664 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
665 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
666 &compressor->gpu_addr, &compressor->cpu_addr);
669 DRM_ERROR("DM: Failed to initialize FBC\n");
671 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
672 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
679 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
680 int pipe, bool *enabled,
681 unsigned char *buf, int max_bytes)
683 struct drm_device *dev = dev_get_drvdata(kdev);
684 struct amdgpu_device *adev = drm_to_adev(dev);
685 struct drm_connector *connector;
686 struct drm_connector_list_iter conn_iter;
687 struct amdgpu_dm_connector *aconnector;
692 mutex_lock(&adev->dm.audio_lock);
694 drm_connector_list_iter_begin(dev, &conn_iter);
695 drm_for_each_connector_iter(connector, &conn_iter) {
696 aconnector = to_amdgpu_dm_connector(connector);
697 if (aconnector->audio_inst != port)
701 ret = drm_eld_size(connector->eld);
702 memcpy(buf, connector->eld, min(max_bytes, ret));
706 drm_connector_list_iter_end(&conn_iter);
708 mutex_unlock(&adev->dm.audio_lock);
710 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
715 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
716 .get_eld = amdgpu_dm_audio_component_get_eld,
719 static int amdgpu_dm_audio_component_bind(struct device *kdev,
720 struct device *hda_kdev, void *data)
722 struct drm_device *dev = dev_get_drvdata(kdev);
723 struct amdgpu_device *adev = drm_to_adev(dev);
724 struct drm_audio_component *acomp = data;
726 acomp->ops = &amdgpu_dm_audio_component_ops;
728 adev->dm.audio_component = acomp;
733 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
734 struct device *hda_kdev, void *data)
736 struct drm_device *dev = dev_get_drvdata(kdev);
737 struct amdgpu_device *adev = drm_to_adev(dev);
738 struct drm_audio_component *acomp = data;
742 adev->dm.audio_component = NULL;
745 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
746 .bind = amdgpu_dm_audio_component_bind,
747 .unbind = amdgpu_dm_audio_component_unbind,
750 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
757 adev->mode_info.audio.enabled = true;
759 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
761 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
762 adev->mode_info.audio.pin[i].channels = -1;
763 adev->mode_info.audio.pin[i].rate = -1;
764 adev->mode_info.audio.pin[i].bits_per_sample = -1;
765 adev->mode_info.audio.pin[i].status_bits = 0;
766 adev->mode_info.audio.pin[i].category_code = 0;
767 adev->mode_info.audio.pin[i].connected = false;
768 adev->mode_info.audio.pin[i].id =
769 adev->dm.dc->res_pool->audios[i]->inst;
770 adev->mode_info.audio.pin[i].offset = 0;
773 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
777 adev->dm.audio_registered = true;
782 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
787 if (!adev->mode_info.audio.enabled)
790 if (adev->dm.audio_registered) {
791 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
792 adev->dm.audio_registered = false;
795 /* TODO: Disable audio? */
797 adev->mode_info.audio.enabled = false;
800 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
802 struct drm_audio_component *acomp = adev->dm.audio_component;
804 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
805 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
807 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
812 static int dm_dmub_hw_init(struct amdgpu_device *adev)
814 const struct dmcub_firmware_header_v1_0 *hdr;
815 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
816 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
817 const struct firmware *dmub_fw = adev->dm.dmub_fw;
818 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
819 struct abm *abm = adev->dm.dc->res_pool->abm;
820 struct dmub_srv_hw_params hw_params;
821 enum dmub_status status;
822 const unsigned char *fw_inst_const, *fw_bss_data;
823 uint32_t i, fw_inst_const_size, fw_bss_data_size;
827 /* DMUB isn't supported on the ASIC. */
831 DRM_ERROR("No framebuffer info for DMUB service.\n");
836 /* Firmware required for DMUB support. */
837 DRM_ERROR("No firmware provided for DMUB.\n");
841 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
842 if (status != DMUB_STATUS_OK) {
843 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
847 if (!has_hw_support) {
848 DRM_INFO("DMUB unsupported on ASIC\n");
852 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
854 fw_inst_const = dmub_fw->data +
855 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
858 fw_bss_data = dmub_fw->data +
859 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
860 le32_to_cpu(hdr->inst_const_bytes);
862 /* Copy firmware and bios info into FB memory. */
863 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
864 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
866 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
868 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
869 * amdgpu_ucode_init_single_fw will load dmub firmware
870 * fw_inst_const part to cw0; otherwise, the firmware back door load
871 * will be done by dm_dmub_hw_init
873 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
874 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
878 if (fw_bss_data_size)
879 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
880 fw_bss_data, fw_bss_data_size);
882 /* Copy firmware bios info into FB memory. */
883 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
886 /* Reset regions that need to be reset. */
887 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
888 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
890 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
891 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
893 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
894 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
896 /* Initialize hardware. */
897 memset(&hw_params, 0, sizeof(hw_params));
898 hw_params.fb_base = adev->gmc.fb_start;
899 hw_params.fb_offset = adev->gmc.aper_base;
901 /* backdoor load firmware and trigger dmub running */
902 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
903 hw_params.load_inst_const = true;
906 hw_params.psp_version = dmcu->psp_version;
908 for (i = 0; i < fb_info->num_fb; ++i)
909 hw_params.fb[i] = &fb_info->fb[i];
911 status = dmub_srv_hw_init(dmub_srv, &hw_params);
912 if (status != DMUB_STATUS_OK) {
913 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
917 /* Wait for firmware load to finish. */
918 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
919 if (status != DMUB_STATUS_OK)
920 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
922 /* Init DMCU and ABM if available. */
924 dmcu->funcs->dmcu_init(dmcu);
925 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
928 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
929 if (!adev->dm.dc->ctx->dmub_srv) {
930 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
934 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
935 adev->dm.dmcub_fw_version);
940 #if defined(CONFIG_DRM_AMD_DC_DCN)
941 #define DMUB_TRACE_MAX_READ 64
942 static void dm_dmub_trace_high_irq(void *interrupt_params)
944 struct common_irq_params *irq_params = interrupt_params;
945 struct amdgpu_device *adev = irq_params->adev;
946 struct amdgpu_display_manager *dm = &adev->dm;
947 struct dmcub_trace_buf_entry entry = { 0 };
951 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
952 trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
953 entry.param0, entry.param1);
955 DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
956 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
962 } while (count <= DMUB_TRACE_MAX_READ);
964 ASSERT(count <= DMUB_TRACE_MAX_READ);
967 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
970 uint32_t logical_addr_low;
971 uint32_t logical_addr_high;
972 uint32_t agp_base, agp_bot, agp_top;
973 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
975 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
976 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
978 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
980 * Raven2 has a HW issue that it is unable to use the vram which
981 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
982 * workaround that increase system aperture high address (add 1)
983 * to get rid of the VM fault and hardware hang.
985 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
987 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
990 agp_bot = adev->gmc.agp_start >> 24;
991 agp_top = adev->gmc.agp_end >> 24;
994 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
995 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
996 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
997 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
998 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
999 page_table_base.low_part = lower_32_bits(pt_base);
1001 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1002 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1004 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1005 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1006 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1008 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1009 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1010 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1012 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1013 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1014 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1016 pa_config->is_hvm_enabled = 0;
1020 #if defined(CONFIG_DRM_AMD_DC_DCN)
1021 static void event_mall_stutter(struct work_struct *work)
1024 struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
1025 struct amdgpu_display_manager *dm = vblank_work->dm;
1027 mutex_lock(&dm->dc_lock);
1029 if (vblank_work->enable)
1030 dm->active_vblank_irq_count++;
1031 else if(dm->active_vblank_irq_count)
1032 dm->active_vblank_irq_count--;
1034 dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1036 DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1038 mutex_unlock(&dm->dc_lock);
1041 static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
1044 int max_caps = dc->caps.max_links;
1045 struct vblank_workqueue *vblank_work;
1048 vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
1049 if (ZERO_OR_NULL_PTR(vblank_work)) {
1054 for (i = 0; i < max_caps; i++)
1055 INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
1060 static int amdgpu_dm_init(struct amdgpu_device *adev)
1062 struct dc_init_data init_data;
1063 #ifdef CONFIG_DRM_AMD_DC_HDCP
1064 struct dc_callback_init init_params;
1068 adev->dm.ddev = adev_to_drm(adev);
1069 adev->dm.adev = adev;
1071 /* Zero all the fields */
1072 memset(&init_data, 0, sizeof(init_data));
1073 #ifdef CONFIG_DRM_AMD_DC_HDCP
1074 memset(&init_params, 0, sizeof(init_params));
1077 mutex_init(&adev->dm.dc_lock);
1078 mutex_init(&adev->dm.audio_lock);
1079 #if defined(CONFIG_DRM_AMD_DC_DCN)
1080 spin_lock_init(&adev->dm.vblank_lock);
1083 if(amdgpu_dm_irq_init(adev)) {
1084 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1088 init_data.asic_id.chip_family = adev->family;
1090 init_data.asic_id.pci_revision_id = adev->pdev->revision;
1091 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1093 init_data.asic_id.vram_width = adev->gmc.vram_width;
1094 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1095 init_data.asic_id.atombios_base_address =
1096 adev->mode_info.atom_context->bios;
1098 init_data.driver = adev;
1100 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1102 if (!adev->dm.cgs_device) {
1103 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1107 init_data.cgs_device = adev->dm.cgs_device;
1109 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1111 switch (adev->asic_type) {
1116 init_data.flags.gpu_vm_support = true;
1117 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1118 init_data.flags.disable_dmcu = true;
1120 #if defined(CONFIG_DRM_AMD_DC_DCN)
1122 init_data.flags.gpu_vm_support = true;
1129 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1130 init_data.flags.fbc_support = true;
1132 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1133 init_data.flags.multi_mon_pp_mclk_switch = true;
1135 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1136 init_data.flags.disable_fractional_pwm = true;
1138 init_data.flags.power_down_display_on_boot = true;
1140 INIT_LIST_HEAD(&adev->dm.da_list);
1141 /* Display Core create. */
1142 adev->dm.dc = dc_create(&init_data);
1145 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1147 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1151 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1152 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1153 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1156 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1157 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1159 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1160 adev->dm.dc->debug.disable_stutter = true;
1162 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1163 adev->dm.dc->debug.disable_dsc = true;
1165 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1166 adev->dm.dc->debug.disable_clock_gate = true;
1168 r = dm_dmub_hw_init(adev);
1170 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1174 dc_hardware_init(adev->dm.dc);
1176 #if defined(CONFIG_DRM_AMD_DC_DCN)
1177 if (adev->apu_flags) {
1178 struct dc_phy_addr_space_config pa_config;
1180 mmhub_read_system_context(adev, &pa_config);
1182 // Call the DC init_memory func
1183 dc_setup_system_context(adev->dm.dc, &pa_config);
1187 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1188 if (!adev->dm.freesync_module) {
1190 "amdgpu: failed to initialize freesync_module.\n");
1192 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1193 adev->dm.freesync_module);
1195 amdgpu_dm_init_color_mod();
1197 #if defined(CONFIG_DRM_AMD_DC_DCN)
1198 if (adev->dm.dc->caps.max_links > 0) {
1199 adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
1201 if (!adev->dm.vblank_workqueue)
1202 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1204 DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
1208 #ifdef CONFIG_DRM_AMD_DC_HDCP
1209 if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1210 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1212 if (!adev->dm.hdcp_workqueue)
1213 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1215 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1217 dc_init_callbacks(adev->dm.dc, &init_params);
1220 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1221 adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1223 if (amdgpu_dm_initialize_drm_device(adev)) {
1225 "amdgpu: failed to initialize sw for display support.\n");
1229 /* create fake encoders for MST */
1230 dm_dp_create_fake_mst_encoders(adev);
1232 /* TODO: Add_display_info? */
1234 /* TODO use dynamic cursor width */
1235 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1236 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1238 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1240 "amdgpu: failed to initialize sw for display support.\n");
1245 DRM_DEBUG_DRIVER("KMS initialized.\n");
1249 amdgpu_dm_fini(adev);
1254 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1258 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1259 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1262 amdgpu_dm_audio_fini(adev);
1264 amdgpu_dm_destroy_drm_device(&adev->dm);
1266 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1267 if (adev->dm.crc_rd_wrk) {
1268 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1269 kfree(adev->dm.crc_rd_wrk);
1270 adev->dm.crc_rd_wrk = NULL;
1273 #ifdef CONFIG_DRM_AMD_DC_HDCP
1274 if (adev->dm.hdcp_workqueue) {
1275 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1276 adev->dm.hdcp_workqueue = NULL;
1280 dc_deinit_callbacks(adev->dm.dc);
1283 #if defined(CONFIG_DRM_AMD_DC_DCN)
1284 if (adev->dm.vblank_workqueue) {
1285 adev->dm.vblank_workqueue->dm = NULL;
1286 kfree(adev->dm.vblank_workqueue);
1287 adev->dm.vblank_workqueue = NULL;
1291 if (adev->dm.dc->ctx->dmub_srv) {
1292 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1293 adev->dm.dc->ctx->dmub_srv = NULL;
1296 if (adev->dm.dmub_bo)
1297 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1298 &adev->dm.dmub_bo_gpu_addr,
1299 &adev->dm.dmub_bo_cpu_addr);
1301 /* DC Destroy TODO: Replace destroy DAL */
1303 dc_destroy(&adev->dm.dc);
1305 * TODO: pageflip, vlank interrupt
1307 * amdgpu_dm_irq_fini(adev);
1310 if (adev->dm.cgs_device) {
1311 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1312 adev->dm.cgs_device = NULL;
1314 if (adev->dm.freesync_module) {
1315 mod_freesync_destroy(adev->dm.freesync_module);
1316 adev->dm.freesync_module = NULL;
1319 mutex_destroy(&adev->dm.audio_lock);
1320 mutex_destroy(&adev->dm.dc_lock);
1325 static int load_dmcu_fw(struct amdgpu_device *adev)
1327 const char *fw_name_dmcu = NULL;
1329 const struct dmcu_firmware_header_v1_0 *hdr;
1331 switch(adev->asic_type) {
1332 #if defined(CONFIG_DRM_AMD_DC_SI)
1347 case CHIP_POLARIS11:
1348 case CHIP_POLARIS10:
1349 case CHIP_POLARIS12:
1357 case CHIP_SIENNA_CICHLID:
1358 case CHIP_NAVY_FLOUNDER:
1359 case CHIP_DIMGREY_CAVEFISH:
1363 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1366 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1367 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1368 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1369 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1374 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1378 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1379 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1383 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1385 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1386 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1387 adev->dm.fw_dmcu = NULL;
1391 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1396 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1398 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1400 release_firmware(adev->dm.fw_dmcu);
1401 adev->dm.fw_dmcu = NULL;
1405 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1406 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1407 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1408 adev->firmware.fw_size +=
1409 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1411 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1412 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1413 adev->firmware.fw_size +=
1414 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1416 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1418 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1423 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1425 struct amdgpu_device *adev = ctx;
1427 return dm_read_reg(adev->dm.dc->ctx, address);
1430 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1433 struct amdgpu_device *adev = ctx;
1435 return dm_write_reg(adev->dm.dc->ctx, address, value);
1438 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1440 struct dmub_srv_create_params create_params;
1441 struct dmub_srv_region_params region_params;
1442 struct dmub_srv_region_info region_info;
1443 struct dmub_srv_fb_params fb_params;
1444 struct dmub_srv_fb_info *fb_info;
1445 struct dmub_srv *dmub_srv;
1446 const struct dmcub_firmware_header_v1_0 *hdr;
1447 const char *fw_name_dmub;
1448 enum dmub_asic dmub_asic;
1449 enum dmub_status status;
1452 switch (adev->asic_type) {
1454 dmub_asic = DMUB_ASIC_DCN21;
1455 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1456 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1457 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1459 case CHIP_SIENNA_CICHLID:
1460 dmub_asic = DMUB_ASIC_DCN30;
1461 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1463 case CHIP_NAVY_FLOUNDER:
1464 dmub_asic = DMUB_ASIC_DCN30;
1465 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1468 dmub_asic = DMUB_ASIC_DCN301;
1469 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1471 case CHIP_DIMGREY_CAVEFISH:
1472 dmub_asic = DMUB_ASIC_DCN302;
1473 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1477 /* ASIC doesn't support DMUB. */
1481 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1483 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1487 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1489 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1493 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1495 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1496 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1497 AMDGPU_UCODE_ID_DMCUB;
1498 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1500 adev->firmware.fw_size +=
1501 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1503 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1504 adev->dm.dmcub_fw_version);
1507 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1509 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1510 dmub_srv = adev->dm.dmub_srv;
1513 DRM_ERROR("Failed to allocate DMUB service!\n");
1517 memset(&create_params, 0, sizeof(create_params));
1518 create_params.user_ctx = adev;
1519 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1520 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1521 create_params.asic = dmub_asic;
1523 /* Create the DMUB service. */
1524 status = dmub_srv_create(dmub_srv, &create_params);
1525 if (status != DMUB_STATUS_OK) {
1526 DRM_ERROR("Error creating DMUB service: %d\n", status);
1530 /* Calculate the size of all the regions for the DMUB service. */
1531 memset(®ion_params, 0, sizeof(region_params));
1533 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1534 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1535 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1536 region_params.vbios_size = adev->bios_size;
1537 region_params.fw_bss_data = region_params.bss_data_size ?
1538 adev->dm.dmub_fw->data +
1539 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1540 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1541 region_params.fw_inst_const =
1542 adev->dm.dmub_fw->data +
1543 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1546 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
1549 if (status != DMUB_STATUS_OK) {
1550 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1555 * Allocate a framebuffer based on the total size of all the regions.
1556 * TODO: Move this into GART.
1558 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1559 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1560 &adev->dm.dmub_bo_gpu_addr,
1561 &adev->dm.dmub_bo_cpu_addr);
1565 /* Rebase the regions on the framebuffer address. */
1566 memset(&fb_params, 0, sizeof(fb_params));
1567 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1568 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1569 fb_params.region_info = ®ion_info;
1571 adev->dm.dmub_fb_info =
1572 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1573 fb_info = adev->dm.dmub_fb_info;
1577 "Failed to allocate framebuffer info for DMUB service!\n");
1581 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1582 if (status != DMUB_STATUS_OK) {
1583 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1590 static int dm_sw_init(void *handle)
1592 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1595 r = dm_dmub_sw_init(adev);
1599 return load_dmcu_fw(adev);
1602 static int dm_sw_fini(void *handle)
1604 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1606 kfree(adev->dm.dmub_fb_info);
1607 adev->dm.dmub_fb_info = NULL;
1609 if (adev->dm.dmub_srv) {
1610 dmub_srv_destroy(adev->dm.dmub_srv);
1611 adev->dm.dmub_srv = NULL;
1614 release_firmware(adev->dm.dmub_fw);
1615 adev->dm.dmub_fw = NULL;
1617 release_firmware(adev->dm.fw_dmcu);
1618 adev->dm.fw_dmcu = NULL;
1623 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1625 struct amdgpu_dm_connector *aconnector;
1626 struct drm_connector *connector;
1627 struct drm_connector_list_iter iter;
1630 drm_connector_list_iter_begin(dev, &iter);
1631 drm_for_each_connector_iter(connector, &iter) {
1632 aconnector = to_amdgpu_dm_connector(connector);
1633 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1634 aconnector->mst_mgr.aux) {
1635 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1637 aconnector->base.base.id);
1639 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1641 DRM_ERROR("DM_MST: Failed to start MST\n");
1642 aconnector->dc_link->type =
1643 dc_connection_single;
1648 drm_connector_list_iter_end(&iter);
1653 static int dm_late_init(void *handle)
1655 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1657 struct dmcu_iram_parameters params;
1658 unsigned int linear_lut[16];
1660 struct dmcu *dmcu = NULL;
1663 dmcu = adev->dm.dc->res_pool->dmcu;
1665 for (i = 0; i < 16; i++)
1666 linear_lut[i] = 0xFFFF * i / 15;
1669 params.backlight_ramping_start = 0xCCCC;
1670 params.backlight_ramping_reduction = 0xCCCCCCCC;
1671 params.backlight_lut_array_size = 16;
1672 params.backlight_lut_array = linear_lut;
1674 /* Min backlight level after ABM reduction, Don't allow below 1%
1675 * 0xFFFF x 0.01 = 0x28F
1677 params.min_abm_backlight = 0x28F;
1679 /* In the case where abm is implemented on dmcub,
1680 * dmcu object will be null.
1681 * ABM 2.4 and up are implemented on dmcub.
1684 ret = dmcu_load_iram(dmcu, params);
1685 else if (adev->dm.dc->ctx->dmub_srv)
1686 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1691 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1694 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1696 struct amdgpu_dm_connector *aconnector;
1697 struct drm_connector *connector;
1698 struct drm_connector_list_iter iter;
1699 struct drm_dp_mst_topology_mgr *mgr;
1701 bool need_hotplug = false;
1703 drm_connector_list_iter_begin(dev, &iter);
1704 drm_for_each_connector_iter(connector, &iter) {
1705 aconnector = to_amdgpu_dm_connector(connector);
1706 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1707 aconnector->mst_port)
1710 mgr = &aconnector->mst_mgr;
1713 drm_dp_mst_topology_mgr_suspend(mgr);
1715 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1717 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1718 need_hotplug = true;
1722 drm_connector_list_iter_end(&iter);
1725 drm_kms_helper_hotplug_event(dev);
1728 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1730 struct smu_context *smu = &adev->smu;
1733 if (!is_support_sw_smu(adev))
1736 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1737 * on window driver dc implementation.
1738 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1739 * should be passed to smu during boot up and resume from s3.
1740 * boot up: dc calculate dcn watermark clock settings within dc_create,
1741 * dcn20_resource_construct
1742 * then call pplib functions below to pass the settings to smu:
1743 * smu_set_watermarks_for_clock_ranges
1744 * smu_set_watermarks_table
1745 * navi10_set_watermarks_table
1746 * smu_write_watermarks_table
1748 * For Renoir, clock settings of dcn watermark are also fixed values.
1749 * dc has implemented different flow for window driver:
1750 * dc_hardware_init / dc_set_power_state
1755 * smu_set_watermarks_for_clock_ranges
1756 * renoir_set_watermarks_table
1757 * smu_write_watermarks_table
1760 * dc_hardware_init -> amdgpu_dm_init
1761 * dc_set_power_state --> dm_resume
1763 * therefore, this function apply to navi10/12/14 but not Renoir
1766 switch(adev->asic_type) {
1775 ret = smu_write_watermarks_table(smu);
1777 DRM_ERROR("Failed to update WMTABLE!\n");
1785 * dm_hw_init() - Initialize DC device
1786 * @handle: The base driver device containing the amdgpu_dm device.
1788 * Initialize the &struct amdgpu_display_manager device. This involves calling
1789 * the initializers of each DM component, then populating the struct with them.
1791 * Although the function implies hardware initialization, both hardware and
1792 * software are initialized here. Splitting them out to their relevant init
1793 * hooks is a future TODO item.
1795 * Some notable things that are initialized here:
1797 * - Display Core, both software and hardware
1798 * - DC modules that we need (freesync and color management)
1799 * - DRM software states
1800 * - Interrupt sources and handlers
1802 * - Debug FS entries, if enabled
1804 static int dm_hw_init(void *handle)
1806 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1807 /* Create DAL display manager */
1808 amdgpu_dm_init(adev);
1809 amdgpu_dm_hpd_init(adev);
1815 * dm_hw_fini() - Teardown DC device
1816 * @handle: The base driver device containing the amdgpu_dm device.
1818 * Teardown components within &struct amdgpu_display_manager that require
1819 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1820 * were loaded. Also flush IRQ workqueues and disable them.
1822 static int dm_hw_fini(void *handle)
1824 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1826 amdgpu_dm_hpd_fini(adev);
1828 amdgpu_dm_irq_fini(adev);
1829 amdgpu_dm_fini(adev);
1834 static int dm_enable_vblank(struct drm_crtc *crtc);
1835 static void dm_disable_vblank(struct drm_crtc *crtc);
1837 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1838 struct dc_state *state, bool enable)
1840 enum dc_irq_source irq_source;
1841 struct amdgpu_crtc *acrtc;
1845 for (i = 0; i < state->stream_count; i++) {
1846 acrtc = get_crtc_by_otg_inst(
1847 adev, state->stream_status[i].primary_otg_inst);
1849 if (acrtc && state->stream_status[i].plane_count != 0) {
1850 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1851 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1852 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
1853 acrtc->crtc_id, enable ? "en" : "dis", rc);
1855 DRM_WARN("Failed to %s pflip interrupts\n",
1856 enable ? "enable" : "disable");
1859 rc = dm_enable_vblank(&acrtc->base);
1861 DRM_WARN("Failed to enable vblank interrupts\n");
1863 dm_disable_vblank(&acrtc->base);
1871 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1873 struct dc_state *context = NULL;
1874 enum dc_status res = DC_ERROR_UNEXPECTED;
1876 struct dc_stream_state *del_streams[MAX_PIPES];
1877 int del_streams_count = 0;
1879 memset(del_streams, 0, sizeof(del_streams));
1881 context = dc_create_state(dc);
1882 if (context == NULL)
1883 goto context_alloc_fail;
1885 dc_resource_state_copy_construct_current(dc, context);
1887 /* First remove from context all streams */
1888 for (i = 0; i < context->stream_count; i++) {
1889 struct dc_stream_state *stream = context->streams[i];
1891 del_streams[del_streams_count++] = stream;
1894 /* Remove all planes for removed streams and then remove the streams */
1895 for (i = 0; i < del_streams_count; i++) {
1896 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1897 res = DC_FAIL_DETACH_SURFACES;
1901 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1907 res = dc_validate_global_state(dc, context, false);
1910 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1914 res = dc_commit_state(dc, context);
1917 dc_release_state(context);
1923 static int dm_suspend(void *handle)
1925 struct amdgpu_device *adev = handle;
1926 struct amdgpu_display_manager *dm = &adev->dm;
1929 if (amdgpu_in_reset(adev)) {
1930 mutex_lock(&dm->dc_lock);
1932 #if defined(CONFIG_DRM_AMD_DC_DCN)
1933 dc_allow_idle_optimizations(adev->dm.dc, false);
1936 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1938 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1940 amdgpu_dm_commit_zero_streams(dm->dc);
1942 amdgpu_dm_irq_suspend(adev);
1947 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
1948 amdgpu_dm_crtc_secure_display_suspend(adev);
1950 WARN_ON(adev->dm.cached_state);
1951 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1953 s3_handle_mst(adev_to_drm(adev), true);
1955 amdgpu_dm_irq_suspend(adev);
1958 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1963 static struct amdgpu_dm_connector *
1964 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1965 struct drm_crtc *crtc)
1968 struct drm_connector_state *new_con_state;
1969 struct drm_connector *connector;
1970 struct drm_crtc *crtc_from_state;
1972 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1973 crtc_from_state = new_con_state->crtc;
1975 if (crtc_from_state == crtc)
1976 return to_amdgpu_dm_connector(connector);
1982 static void emulated_link_detect(struct dc_link *link)
1984 struct dc_sink_init_data sink_init_data = { 0 };
1985 struct display_sink_capability sink_caps = { 0 };
1986 enum dc_edid_status edid_status;
1987 struct dc_context *dc_ctx = link->ctx;
1988 struct dc_sink *sink = NULL;
1989 struct dc_sink *prev_sink = NULL;
1991 link->type = dc_connection_none;
1992 prev_sink = link->local_sink;
1995 dc_sink_release(prev_sink);
1997 switch (link->connector_signal) {
1998 case SIGNAL_TYPE_HDMI_TYPE_A: {
1999 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2000 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2004 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2005 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2006 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2010 case SIGNAL_TYPE_DVI_DUAL_LINK: {
2011 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2012 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2016 case SIGNAL_TYPE_LVDS: {
2017 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2018 sink_caps.signal = SIGNAL_TYPE_LVDS;
2022 case SIGNAL_TYPE_EDP: {
2023 sink_caps.transaction_type =
2024 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2025 sink_caps.signal = SIGNAL_TYPE_EDP;
2029 case SIGNAL_TYPE_DISPLAY_PORT: {
2030 sink_caps.transaction_type =
2031 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2032 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2037 DC_ERROR("Invalid connector type! signal:%d\n",
2038 link->connector_signal);
2042 sink_init_data.link = link;
2043 sink_init_data.sink_signal = sink_caps.signal;
2045 sink = dc_sink_create(&sink_init_data);
2047 DC_ERROR("Failed to create sink!\n");
2051 /* dc_sink_create returns a new reference */
2052 link->local_sink = sink;
2054 edid_status = dm_helpers_read_local_edid(
2059 if (edid_status != EDID_OK)
2060 DC_ERROR("Failed to read EDID");
2064 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2065 struct amdgpu_display_manager *dm)
2068 struct dc_surface_update surface_updates[MAX_SURFACES];
2069 struct dc_plane_info plane_infos[MAX_SURFACES];
2070 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2071 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2072 struct dc_stream_update stream_update;
2076 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2079 dm_error("Failed to allocate update bundle\n");
2083 for (k = 0; k < dc_state->stream_count; k++) {
2084 bundle->stream_update.stream = dc_state->streams[k];
2086 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2087 bundle->surface_updates[m].surface =
2088 dc_state->stream_status->plane_states[m];
2089 bundle->surface_updates[m].surface->force_full_update =
2092 dc_commit_updates_for_stream(
2093 dm->dc, bundle->surface_updates,
2094 dc_state->stream_status->plane_count,
2095 dc_state->streams[k], &bundle->stream_update, dc_state);
2104 static void dm_set_dpms_off(struct dc_link *link)
2106 struct dc_stream_state *stream_state;
2107 struct amdgpu_dm_connector *aconnector = link->priv;
2108 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2109 struct dc_stream_update stream_update;
2110 bool dpms_off = true;
2112 memset(&stream_update, 0, sizeof(stream_update));
2113 stream_update.dpms_off = &dpms_off;
2115 mutex_lock(&adev->dm.dc_lock);
2116 stream_state = dc_stream_find_from_link(link);
2118 if (stream_state == NULL) {
2119 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2120 mutex_unlock(&adev->dm.dc_lock);
2124 stream_update.stream = stream_state;
2125 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2126 stream_state, &stream_update,
2127 stream_state->ctx->dc->current_state);
2128 mutex_unlock(&adev->dm.dc_lock);
2131 static int dm_resume(void *handle)
2133 struct amdgpu_device *adev = handle;
2134 struct drm_device *ddev = adev_to_drm(adev);
2135 struct amdgpu_display_manager *dm = &adev->dm;
2136 struct amdgpu_dm_connector *aconnector;
2137 struct drm_connector *connector;
2138 struct drm_connector_list_iter iter;
2139 struct drm_crtc *crtc;
2140 struct drm_crtc_state *new_crtc_state;
2141 struct dm_crtc_state *dm_new_crtc_state;
2142 struct drm_plane *plane;
2143 struct drm_plane_state *new_plane_state;
2144 struct dm_plane_state *dm_new_plane_state;
2145 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2146 enum dc_connection_type new_connection_type = dc_connection_none;
2147 struct dc_state *dc_state;
2150 if (amdgpu_in_reset(adev)) {
2151 dc_state = dm->cached_dc_state;
2153 r = dm_dmub_hw_init(adev);
2155 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2157 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2160 amdgpu_dm_irq_resume_early(adev);
2162 for (i = 0; i < dc_state->stream_count; i++) {
2163 dc_state->streams[i]->mode_changed = true;
2164 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2165 dc_state->stream_status->plane_states[j]->update_flags.raw
2170 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2172 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2174 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2176 dc_release_state(dm->cached_dc_state);
2177 dm->cached_dc_state = NULL;
2179 amdgpu_dm_irq_resume_late(adev);
2181 mutex_unlock(&dm->dc_lock);
2185 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2186 dc_release_state(dm_state->context);
2187 dm_state->context = dc_create_state(dm->dc);
2188 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2189 dc_resource_state_construct(dm->dc, dm_state->context);
2191 /* Before powering on DC we need to re-initialize DMUB. */
2192 r = dm_dmub_hw_init(adev);
2194 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2196 /* power on hardware */
2197 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2199 /* program HPD filter */
2203 * early enable HPD Rx IRQ, should be done before set mode as short
2204 * pulse interrupts are used for MST
2206 amdgpu_dm_irq_resume_early(adev);
2208 /* On resume we need to rewrite the MSTM control bits to enable MST*/
2209 s3_handle_mst(ddev, false);
2212 drm_connector_list_iter_begin(ddev, &iter);
2213 drm_for_each_connector_iter(connector, &iter) {
2214 aconnector = to_amdgpu_dm_connector(connector);
2217 * this is the case when traversing through already created
2218 * MST connectors, should be skipped
2220 if (aconnector->mst_port)
2223 mutex_lock(&aconnector->hpd_lock);
2224 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2225 DRM_ERROR("KMS: Failed to detect connector\n");
2227 if (aconnector->base.force && new_connection_type == dc_connection_none)
2228 emulated_link_detect(aconnector->dc_link);
2230 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2232 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2233 aconnector->fake_enable = false;
2235 if (aconnector->dc_sink)
2236 dc_sink_release(aconnector->dc_sink);
2237 aconnector->dc_sink = NULL;
2238 amdgpu_dm_update_connector_after_detect(aconnector);
2239 mutex_unlock(&aconnector->hpd_lock);
2241 drm_connector_list_iter_end(&iter);
2243 /* Force mode set in atomic commit */
2244 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2245 new_crtc_state->active_changed = true;
2248 * atomic_check is expected to create the dc states. We need to release
2249 * them here, since they were duplicated as part of the suspend
2252 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2253 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2254 if (dm_new_crtc_state->stream) {
2255 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2256 dc_stream_release(dm_new_crtc_state->stream);
2257 dm_new_crtc_state->stream = NULL;
2261 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2262 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2263 if (dm_new_plane_state->dc_state) {
2264 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2265 dc_plane_state_release(dm_new_plane_state->dc_state);
2266 dm_new_plane_state->dc_state = NULL;
2270 drm_atomic_helper_resume(ddev, dm->cached_state);
2272 dm->cached_state = NULL;
2274 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
2275 amdgpu_dm_crtc_secure_display_resume(adev);
2278 amdgpu_dm_irq_resume_late(adev);
2280 amdgpu_dm_smu_write_watermarks_table(adev);
2288 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2289 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2290 * the base driver's device list to be initialized and torn down accordingly.
2292 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2295 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2297 .early_init = dm_early_init,
2298 .late_init = dm_late_init,
2299 .sw_init = dm_sw_init,
2300 .sw_fini = dm_sw_fini,
2301 .hw_init = dm_hw_init,
2302 .hw_fini = dm_hw_fini,
2303 .suspend = dm_suspend,
2304 .resume = dm_resume,
2305 .is_idle = dm_is_idle,
2306 .wait_for_idle = dm_wait_for_idle,
2307 .check_soft_reset = dm_check_soft_reset,
2308 .soft_reset = dm_soft_reset,
2309 .set_clockgating_state = dm_set_clockgating_state,
2310 .set_powergating_state = dm_set_powergating_state,
2313 const struct amdgpu_ip_block_version dm_ip_block =
2315 .type = AMD_IP_BLOCK_TYPE_DCE,
2319 .funcs = &amdgpu_dm_funcs,
2329 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2330 .fb_create = amdgpu_display_user_framebuffer_create,
2331 .get_format_info = amd_get_format_info,
2332 .output_poll_changed = drm_fb_helper_output_poll_changed,
2333 .atomic_check = amdgpu_dm_atomic_check,
2334 .atomic_commit = drm_atomic_helper_commit,
2337 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2338 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2341 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2343 u32 max_cll, min_cll, max, min, q, r;
2344 struct amdgpu_dm_backlight_caps *caps;
2345 struct amdgpu_display_manager *dm;
2346 struct drm_connector *conn_base;
2347 struct amdgpu_device *adev;
2348 struct dc_link *link = NULL;
2349 static const u8 pre_computed_values[] = {
2350 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2351 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2353 if (!aconnector || !aconnector->dc_link)
2356 link = aconnector->dc_link;
2357 if (link->connector_signal != SIGNAL_TYPE_EDP)
2360 conn_base = &aconnector->base;
2361 adev = drm_to_adev(conn_base->dev);
2363 caps = &dm->backlight_caps;
2364 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2365 caps->aux_support = false;
2366 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2367 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2369 if (caps->ext_caps->bits.oled == 1 ||
2370 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2371 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2372 caps->aux_support = true;
2374 if (amdgpu_backlight == 0)
2375 caps->aux_support = false;
2376 else if (amdgpu_backlight == 1)
2377 caps->aux_support = true;
2379 /* From the specification (CTA-861-G), for calculating the maximum
2380 * luminance we need to use:
2381 * Luminance = 50*2**(CV/32)
2382 * Where CV is a one-byte value.
2383 * For calculating this expression we may need float point precision;
2384 * to avoid this complexity level, we take advantage that CV is divided
2385 * by a constant. From the Euclids division algorithm, we know that CV
2386 * can be written as: CV = 32*q + r. Next, we replace CV in the
2387 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2388 * need to pre-compute the value of r/32. For pre-computing the values
2389 * We just used the following Ruby line:
2390 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2391 * The results of the above expressions can be verified at
2392 * pre_computed_values.
2396 max = (1 << q) * pre_computed_values[r];
2398 // min luminance: maxLum * (CV/255)^2 / 100
2399 q = DIV_ROUND_CLOSEST(min_cll, 255);
2400 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2402 caps->aux_max_input_signal = max;
2403 caps->aux_min_input_signal = min;
2406 void amdgpu_dm_update_connector_after_detect(
2407 struct amdgpu_dm_connector *aconnector)
2409 struct drm_connector *connector = &aconnector->base;
2410 struct drm_device *dev = connector->dev;
2411 struct dc_sink *sink;
2413 /* MST handled by drm_mst framework */
2414 if (aconnector->mst_mgr.mst_state == true)
2417 sink = aconnector->dc_link->local_sink;
2419 dc_sink_retain(sink);
2422 * Edid mgmt connector gets first update only in mode_valid hook and then
2423 * the connector sink is set to either fake or physical sink depends on link status.
2424 * Skip if already done during boot.
2426 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2427 && aconnector->dc_em_sink) {
2430 * For S3 resume with headless use eml_sink to fake stream
2431 * because on resume connector->sink is set to NULL
2433 mutex_lock(&dev->mode_config.mutex);
2436 if (aconnector->dc_sink) {
2437 amdgpu_dm_update_freesync_caps(connector, NULL);
2439 * retain and release below are used to
2440 * bump up refcount for sink because the link doesn't point
2441 * to it anymore after disconnect, so on next crtc to connector
2442 * reshuffle by UMD we will get into unwanted dc_sink release
2444 dc_sink_release(aconnector->dc_sink);
2446 aconnector->dc_sink = sink;
2447 dc_sink_retain(aconnector->dc_sink);
2448 amdgpu_dm_update_freesync_caps(connector,
2451 amdgpu_dm_update_freesync_caps(connector, NULL);
2452 if (!aconnector->dc_sink) {
2453 aconnector->dc_sink = aconnector->dc_em_sink;
2454 dc_sink_retain(aconnector->dc_sink);
2458 mutex_unlock(&dev->mode_config.mutex);
2461 dc_sink_release(sink);
2466 * TODO: temporary guard to look for proper fix
2467 * if this sink is MST sink, we should not do anything
2469 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2470 dc_sink_release(sink);
2474 if (aconnector->dc_sink == sink) {
2476 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2479 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2480 aconnector->connector_id);
2482 dc_sink_release(sink);
2486 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2487 aconnector->connector_id, aconnector->dc_sink, sink);
2489 mutex_lock(&dev->mode_config.mutex);
2492 * 1. Update status of the drm connector
2493 * 2. Send an event and let userspace tell us what to do
2497 * TODO: check if we still need the S3 mode update workaround.
2498 * If yes, put it here.
2500 if (aconnector->dc_sink) {
2501 amdgpu_dm_update_freesync_caps(connector, NULL);
2502 dc_sink_release(aconnector->dc_sink);
2505 aconnector->dc_sink = sink;
2506 dc_sink_retain(aconnector->dc_sink);
2507 if (sink->dc_edid.length == 0) {
2508 aconnector->edid = NULL;
2509 if (aconnector->dc_link->aux_mode) {
2510 drm_dp_cec_unset_edid(
2511 &aconnector->dm_dp_aux.aux);
2515 (struct edid *)sink->dc_edid.raw_edid;
2517 drm_connector_update_edid_property(connector,
2519 if (aconnector->dc_link->aux_mode)
2520 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2524 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2525 update_connector_ext_caps(aconnector);
2527 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2528 amdgpu_dm_update_freesync_caps(connector, NULL);
2529 drm_connector_update_edid_property(connector, NULL);
2530 aconnector->num_modes = 0;
2531 dc_sink_release(aconnector->dc_sink);
2532 aconnector->dc_sink = NULL;
2533 aconnector->edid = NULL;
2534 #ifdef CONFIG_DRM_AMD_DC_HDCP
2535 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2536 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2537 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2541 mutex_unlock(&dev->mode_config.mutex);
2543 update_subconnector_property(aconnector);
2546 dc_sink_release(sink);
2549 static void handle_hpd_irq(void *param)
2551 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2552 struct drm_connector *connector = &aconnector->base;
2553 struct drm_device *dev = connector->dev;
2554 enum dc_connection_type new_connection_type = dc_connection_none;
2555 #ifdef CONFIG_DRM_AMD_DC_HDCP
2556 struct amdgpu_device *adev = drm_to_adev(dev);
2557 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2561 * In case of failure or MST no need to update connector status or notify the OS
2562 * since (for MST case) MST does this in its own context.
2564 mutex_lock(&aconnector->hpd_lock);
2566 #ifdef CONFIG_DRM_AMD_DC_HDCP
2567 if (adev->dm.hdcp_workqueue) {
2568 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2569 dm_con_state->update_hdcp = true;
2572 if (aconnector->fake_enable)
2573 aconnector->fake_enable = false;
2575 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2576 DRM_ERROR("KMS: Failed to detect connector\n");
2578 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2579 emulated_link_detect(aconnector->dc_link);
2582 drm_modeset_lock_all(dev);
2583 dm_restore_drm_connector_state(dev, connector);
2584 drm_modeset_unlock_all(dev);
2586 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2587 drm_kms_helper_hotplug_event(dev);
2589 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2590 if (new_connection_type == dc_connection_none &&
2591 aconnector->dc_link->type == dc_connection_none)
2592 dm_set_dpms_off(aconnector->dc_link);
2594 amdgpu_dm_update_connector_after_detect(aconnector);
2596 drm_modeset_lock_all(dev);
2597 dm_restore_drm_connector_state(dev, connector);
2598 drm_modeset_unlock_all(dev);
2600 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2601 drm_kms_helper_hotplug_event(dev);
2603 mutex_unlock(&aconnector->hpd_lock);
2607 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2609 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2611 bool new_irq_handled = false;
2613 int dpcd_bytes_to_read;
2615 const int max_process_count = 30;
2616 int process_count = 0;
2618 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2620 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2621 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2622 /* DPCD 0x200 - 0x201 for downstream IRQ */
2623 dpcd_addr = DP_SINK_COUNT;
2625 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2626 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2627 dpcd_addr = DP_SINK_COUNT_ESI;
2630 dret = drm_dp_dpcd_read(
2631 &aconnector->dm_dp_aux.aux,
2634 dpcd_bytes_to_read);
2636 while (dret == dpcd_bytes_to_read &&
2637 process_count < max_process_count) {
2643 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2644 /* handle HPD short pulse irq */
2645 if (aconnector->mst_mgr.mst_state)
2647 &aconnector->mst_mgr,
2651 if (new_irq_handled) {
2652 /* ACK at DPCD to notify down stream */
2653 const int ack_dpcd_bytes_to_write =
2654 dpcd_bytes_to_read - 1;
2656 for (retry = 0; retry < 3; retry++) {
2659 wret = drm_dp_dpcd_write(
2660 &aconnector->dm_dp_aux.aux,
2663 ack_dpcd_bytes_to_write);
2664 if (wret == ack_dpcd_bytes_to_write)
2668 /* check if there is new irq to be handled */
2669 dret = drm_dp_dpcd_read(
2670 &aconnector->dm_dp_aux.aux,
2673 dpcd_bytes_to_read);
2675 new_irq_handled = false;
2681 if (process_count == max_process_count)
2682 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2685 static void handle_hpd_rx_irq(void *param)
2687 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2688 struct drm_connector *connector = &aconnector->base;
2689 struct drm_device *dev = connector->dev;
2690 struct dc_link *dc_link = aconnector->dc_link;
2691 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2692 bool result = false;
2693 enum dc_connection_type new_connection_type = dc_connection_none;
2694 struct amdgpu_device *adev = drm_to_adev(dev);
2695 union hpd_irq_data hpd_irq_data;
2697 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2700 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2701 * conflict, after implement i2c helper, this mutex should be
2704 if (dc_link->type != dc_connection_mst_branch)
2705 mutex_lock(&aconnector->hpd_lock);
2707 read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2709 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2710 (dc_link->type == dc_connection_mst_branch)) {
2711 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2713 dm_handle_hpd_rx_irq(aconnector);
2715 } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2717 dm_handle_hpd_rx_irq(aconnector);
2722 mutex_lock(&adev->dm.dc_lock);
2723 #ifdef CONFIG_DRM_AMD_DC_HDCP
2724 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2726 result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2728 mutex_unlock(&adev->dm.dc_lock);
2731 if (result && !is_mst_root_connector) {
2732 /* Downstream Port status changed. */
2733 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2734 DRM_ERROR("KMS: Failed to detect connector\n");
2736 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2737 emulated_link_detect(dc_link);
2739 if (aconnector->fake_enable)
2740 aconnector->fake_enable = false;
2742 amdgpu_dm_update_connector_after_detect(aconnector);
2745 drm_modeset_lock_all(dev);
2746 dm_restore_drm_connector_state(dev, connector);
2747 drm_modeset_unlock_all(dev);
2749 drm_kms_helper_hotplug_event(dev);
2750 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2752 if (aconnector->fake_enable)
2753 aconnector->fake_enable = false;
2755 amdgpu_dm_update_connector_after_detect(aconnector);
2758 drm_modeset_lock_all(dev);
2759 dm_restore_drm_connector_state(dev, connector);
2760 drm_modeset_unlock_all(dev);
2762 drm_kms_helper_hotplug_event(dev);
2765 #ifdef CONFIG_DRM_AMD_DC_HDCP
2766 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2767 if (adev->dm.hdcp_workqueue)
2768 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2772 if (dc_link->type != dc_connection_mst_branch) {
2773 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2774 mutex_unlock(&aconnector->hpd_lock);
2778 static void register_hpd_handlers(struct amdgpu_device *adev)
2780 struct drm_device *dev = adev_to_drm(adev);
2781 struct drm_connector *connector;
2782 struct amdgpu_dm_connector *aconnector;
2783 const struct dc_link *dc_link;
2784 struct dc_interrupt_params int_params = {0};
2786 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2787 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2789 list_for_each_entry(connector,
2790 &dev->mode_config.connector_list, head) {
2792 aconnector = to_amdgpu_dm_connector(connector);
2793 dc_link = aconnector->dc_link;
2795 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2796 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2797 int_params.irq_source = dc_link->irq_source_hpd;
2799 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2801 (void *) aconnector);
2804 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2806 /* Also register for DP short pulse (hpd_rx). */
2807 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2808 int_params.irq_source = dc_link->irq_source_hpd_rx;
2810 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2812 (void *) aconnector);
2817 #if defined(CONFIG_DRM_AMD_DC_SI)
2818 /* Register IRQ sources and initialize IRQ callbacks */
2819 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2821 struct dc *dc = adev->dm.dc;
2822 struct common_irq_params *c_irq_params;
2823 struct dc_interrupt_params int_params = {0};
2826 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2828 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2829 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2832 * Actions of amdgpu_irq_add_id():
2833 * 1. Register a set() function with base driver.
2834 * Base driver will call set() function to enable/disable an
2835 * interrupt in DC hardware.
2836 * 2. Register amdgpu_dm_irq_handler().
2837 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2838 * coming from DC hardware.
2839 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2840 * for acknowledging and handling. */
2842 /* Use VBLANK interrupt */
2843 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2844 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2846 DRM_ERROR("Failed to add crtc irq id!\n");
2850 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2851 int_params.irq_source =
2852 dc_interrupt_to_irq_source(dc, i+1 , 0);
2854 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2856 c_irq_params->adev = adev;
2857 c_irq_params->irq_src = int_params.irq_source;
2859 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2860 dm_crtc_high_irq, c_irq_params);
2863 /* Use GRPH_PFLIP interrupt */
2864 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2865 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2866 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2868 DRM_ERROR("Failed to add page flip irq id!\n");
2872 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2873 int_params.irq_source =
2874 dc_interrupt_to_irq_source(dc, i, 0);
2876 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2878 c_irq_params->adev = adev;
2879 c_irq_params->irq_src = int_params.irq_source;
2881 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2882 dm_pflip_high_irq, c_irq_params);
2887 r = amdgpu_irq_add_id(adev, client_id,
2888 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2890 DRM_ERROR("Failed to add hpd irq id!\n");
2894 register_hpd_handlers(adev);
2900 /* Register IRQ sources and initialize IRQ callbacks */
2901 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2903 struct dc *dc = adev->dm.dc;
2904 struct common_irq_params *c_irq_params;
2905 struct dc_interrupt_params int_params = {0};
2908 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2910 if (adev->asic_type >= CHIP_VEGA10)
2911 client_id = SOC15_IH_CLIENTID_DCE;
2913 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2914 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2917 * Actions of amdgpu_irq_add_id():
2918 * 1. Register a set() function with base driver.
2919 * Base driver will call set() function to enable/disable an
2920 * interrupt in DC hardware.
2921 * 2. Register amdgpu_dm_irq_handler().
2922 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2923 * coming from DC hardware.
2924 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2925 * for acknowledging and handling. */
2927 /* Use VBLANK interrupt */
2928 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2929 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2931 DRM_ERROR("Failed to add crtc irq id!\n");
2935 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2936 int_params.irq_source =
2937 dc_interrupt_to_irq_source(dc, i, 0);
2939 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2941 c_irq_params->adev = adev;
2942 c_irq_params->irq_src = int_params.irq_source;
2944 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2945 dm_crtc_high_irq, c_irq_params);
2948 /* Use VUPDATE interrupt */
2949 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2950 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2952 DRM_ERROR("Failed to add vupdate irq id!\n");
2956 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2957 int_params.irq_source =
2958 dc_interrupt_to_irq_source(dc, i, 0);
2960 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2962 c_irq_params->adev = adev;
2963 c_irq_params->irq_src = int_params.irq_source;
2965 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2966 dm_vupdate_high_irq, c_irq_params);
2969 /* Use GRPH_PFLIP interrupt */
2970 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2971 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2972 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2974 DRM_ERROR("Failed to add page flip irq id!\n");
2978 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2979 int_params.irq_source =
2980 dc_interrupt_to_irq_source(dc, i, 0);
2982 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2984 c_irq_params->adev = adev;
2985 c_irq_params->irq_src = int_params.irq_source;
2987 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2988 dm_pflip_high_irq, c_irq_params);
2993 r = amdgpu_irq_add_id(adev, client_id,
2994 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2996 DRM_ERROR("Failed to add hpd irq id!\n");
3000 register_hpd_handlers(adev);
3005 #if defined(CONFIG_DRM_AMD_DC_DCN)
3006 /* Register IRQ sources and initialize IRQ callbacks */
3007 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3009 struct dc *dc = adev->dm.dc;
3010 struct common_irq_params *c_irq_params;
3011 struct dc_interrupt_params int_params = {0};
3014 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3015 static const unsigned int vrtl_int_srcid[] = {
3016 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3017 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3018 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3019 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3020 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3021 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3025 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3026 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3029 * Actions of amdgpu_irq_add_id():
3030 * 1. Register a set() function with base driver.
3031 * Base driver will call set() function to enable/disable an
3032 * interrupt in DC hardware.
3033 * 2. Register amdgpu_dm_irq_handler().
3034 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3035 * coming from DC hardware.
3036 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3037 * for acknowledging and handling.
3040 /* Use VSTARTUP interrupt */
3041 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3042 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3044 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3047 DRM_ERROR("Failed to add crtc irq id!\n");
3051 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3052 int_params.irq_source =
3053 dc_interrupt_to_irq_source(dc, i, 0);
3055 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3057 c_irq_params->adev = adev;
3058 c_irq_params->irq_src = int_params.irq_source;
3060 amdgpu_dm_irq_register_interrupt(
3061 adev, &int_params, dm_crtc_high_irq, c_irq_params);
3064 /* Use otg vertical line interrupt */
3065 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3066 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3067 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3068 vrtl_int_srcid[i], &adev->vline0_irq);
3071 DRM_ERROR("Failed to add vline0 irq id!\n");
3075 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3076 int_params.irq_source =
3077 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3079 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3080 DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3084 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3085 - DC_IRQ_SOURCE_DC1_VLINE0];
3087 c_irq_params->adev = adev;
3088 c_irq_params->irq_src = int_params.irq_source;
3090 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3091 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3095 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3096 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3097 * to trigger at end of each vblank, regardless of state of the lock,
3098 * matching DCE behaviour.
3100 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3101 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3103 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3106 DRM_ERROR("Failed to add vupdate irq id!\n");
3110 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3111 int_params.irq_source =
3112 dc_interrupt_to_irq_source(dc, i, 0);
3114 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3116 c_irq_params->adev = adev;
3117 c_irq_params->irq_src = int_params.irq_source;
3119 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3120 dm_vupdate_high_irq, c_irq_params);
3123 /* Use GRPH_PFLIP interrupt */
3124 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3125 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3127 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3129 DRM_ERROR("Failed to add page flip irq id!\n");
3133 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3134 int_params.irq_source =
3135 dc_interrupt_to_irq_source(dc, i, 0);
3137 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3139 c_irq_params->adev = adev;
3140 c_irq_params->irq_src = int_params.irq_source;
3142 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3143 dm_pflip_high_irq, c_irq_params);
3147 if (dc->ctx->dmub_srv) {
3148 i = DCN_1_0__SRCID__DMCUB_OUTBOX_HIGH_PRIORITY_READY_INT;
3149 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->dmub_trace_irq);
3152 DRM_ERROR("Failed to add dmub trace irq id!\n");
3156 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3157 int_params.irq_source =
3158 dc_interrupt_to_irq_source(dc, i, 0);
3160 c_irq_params = &adev->dm.dmub_trace_params[0];
3162 c_irq_params->adev = adev;
3163 c_irq_params->irq_src = int_params.irq_source;
3165 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3166 dm_dmub_trace_high_irq, c_irq_params);
3170 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3173 DRM_ERROR("Failed to add hpd irq id!\n");
3177 register_hpd_handlers(adev);
3184 * Acquires the lock for the atomic state object and returns
3185 * the new atomic state.
3187 * This should only be called during atomic check.
3189 static int dm_atomic_get_state(struct drm_atomic_state *state,
3190 struct dm_atomic_state **dm_state)
3192 struct drm_device *dev = state->dev;
3193 struct amdgpu_device *adev = drm_to_adev(dev);
3194 struct amdgpu_display_manager *dm = &adev->dm;
3195 struct drm_private_state *priv_state;
3200 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3201 if (IS_ERR(priv_state))
3202 return PTR_ERR(priv_state);
3204 *dm_state = to_dm_atomic_state(priv_state);
3209 static struct dm_atomic_state *
3210 dm_atomic_get_new_state(struct drm_atomic_state *state)
3212 struct drm_device *dev = state->dev;
3213 struct amdgpu_device *adev = drm_to_adev(dev);
3214 struct amdgpu_display_manager *dm = &adev->dm;
3215 struct drm_private_obj *obj;
3216 struct drm_private_state *new_obj_state;
3219 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3220 if (obj->funcs == dm->atomic_obj.funcs)
3221 return to_dm_atomic_state(new_obj_state);
3227 static struct drm_private_state *
3228 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3230 struct dm_atomic_state *old_state, *new_state;
3232 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3236 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3238 old_state = to_dm_atomic_state(obj->state);
3240 if (old_state && old_state->context)
3241 new_state->context = dc_copy_state(old_state->context);
3243 if (!new_state->context) {
3248 return &new_state->base;
3251 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3252 struct drm_private_state *state)
3254 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3256 if (dm_state && dm_state->context)
3257 dc_release_state(dm_state->context);
3262 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3263 .atomic_duplicate_state = dm_atomic_duplicate_state,
3264 .atomic_destroy_state = dm_atomic_destroy_state,
3267 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3269 struct dm_atomic_state *state;
3272 adev->mode_info.mode_config_initialized = true;
3274 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3275 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3277 adev_to_drm(adev)->mode_config.max_width = 16384;
3278 adev_to_drm(adev)->mode_config.max_height = 16384;
3280 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3281 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3282 /* indicates support for immediate flip */
3283 adev_to_drm(adev)->mode_config.async_page_flip = true;
3285 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3287 state = kzalloc(sizeof(*state), GFP_KERNEL);
3291 state->context = dc_create_state(adev->dm.dc);
3292 if (!state->context) {
3297 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3299 drm_atomic_private_obj_init(adev_to_drm(adev),
3300 &adev->dm.atomic_obj,
3302 &dm_atomic_state_funcs);
3304 r = amdgpu_display_modeset_create_props(adev);
3306 dc_release_state(state->context);
3311 r = amdgpu_dm_audio_init(adev);
3313 dc_release_state(state->context);
3321 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3322 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3323 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3325 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3326 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3328 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3330 #if defined(CONFIG_ACPI)
3331 struct amdgpu_dm_backlight_caps caps;
3333 memset(&caps, 0, sizeof(caps));
3335 if (dm->backlight_caps.caps_valid)
3338 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3339 if (caps.caps_valid) {
3340 dm->backlight_caps.caps_valid = true;
3341 if (caps.aux_support)
3343 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3344 dm->backlight_caps.max_input_signal = caps.max_input_signal;
3346 dm->backlight_caps.min_input_signal =
3347 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3348 dm->backlight_caps.max_input_signal =
3349 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3352 if (dm->backlight_caps.aux_support)
3355 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3356 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3360 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3361 unsigned *min, unsigned *max)
3366 if (caps->aux_support) {
3367 // Firmware limits are in nits, DC API wants millinits.
3368 *max = 1000 * caps->aux_max_input_signal;
3369 *min = 1000 * caps->aux_min_input_signal;
3371 // Firmware limits are 8-bit, PWM control is 16-bit.
3372 *max = 0x101 * caps->max_input_signal;
3373 *min = 0x101 * caps->min_input_signal;
3378 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3379 uint32_t brightness)
3383 if (!get_brightness_range(caps, &min, &max))
3386 // Rescale 0..255 to min..max
3387 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3388 AMDGPU_MAX_BL_LEVEL);
3391 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3392 uint32_t brightness)
3396 if (!get_brightness_range(caps, &min, &max))
3399 if (brightness < min)
3401 // Rescale min..max to 0..255
3402 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3406 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3408 struct amdgpu_display_manager *dm = bl_get_data(bd);
3409 struct amdgpu_dm_backlight_caps caps;
3410 struct dc_link *link = NULL;
3414 amdgpu_dm_update_backlight_caps(dm);
3415 caps = dm->backlight_caps;
3417 link = (struct dc_link *)dm->backlight_link;
3419 brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3420 // Change brightness based on AUX property
3421 if (caps.aux_support)
3422 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3423 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3425 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3430 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3432 struct amdgpu_display_manager *dm = bl_get_data(bd);
3433 struct amdgpu_dm_backlight_caps caps;
3435 amdgpu_dm_update_backlight_caps(dm);
3436 caps = dm->backlight_caps;
3438 if (caps.aux_support) {
3439 struct dc_link *link = (struct dc_link *)dm->backlight_link;
3443 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3445 return bd->props.brightness;
3446 return convert_brightness_to_user(&caps, avg);
3448 int ret = dc_link_get_backlight_level(dm->backlight_link);
3450 if (ret == DC_ERROR_UNEXPECTED)
3451 return bd->props.brightness;
3452 return convert_brightness_to_user(&caps, ret);
3456 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3457 .options = BL_CORE_SUSPENDRESUME,
3458 .get_brightness = amdgpu_dm_backlight_get_brightness,
3459 .update_status = amdgpu_dm_backlight_update_status,
3463 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3466 struct backlight_properties props = { 0 };
3468 amdgpu_dm_update_backlight_caps(dm);
3470 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3471 props.brightness = AMDGPU_MAX_BL_LEVEL;
3472 props.type = BACKLIGHT_RAW;
3474 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3475 adev_to_drm(dm->adev)->primary->index);
3477 dm->backlight_dev = backlight_device_register(bl_name,
3478 adev_to_drm(dm->adev)->dev,
3480 &amdgpu_dm_backlight_ops,
3483 if (IS_ERR(dm->backlight_dev))
3484 DRM_ERROR("DM: Backlight registration failed!\n");
3486 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3491 static int initialize_plane(struct amdgpu_display_manager *dm,
3492 struct amdgpu_mode_info *mode_info, int plane_id,
3493 enum drm_plane_type plane_type,
3494 const struct dc_plane_cap *plane_cap)
3496 struct drm_plane *plane;
3497 unsigned long possible_crtcs;
3500 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3502 DRM_ERROR("KMS: Failed to allocate plane\n");
3505 plane->type = plane_type;
3508 * HACK: IGT tests expect that the primary plane for a CRTC
3509 * can only have one possible CRTC. Only expose support for
3510 * any CRTC if they're not going to be used as a primary plane
3511 * for a CRTC - like overlay or underlay planes.
3513 possible_crtcs = 1 << plane_id;
3514 if (plane_id >= dm->dc->caps.max_streams)
3515 possible_crtcs = 0xff;
3517 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3520 DRM_ERROR("KMS: Failed to initialize plane\n");
3526 mode_info->planes[plane_id] = plane;
3532 static void register_backlight_device(struct amdgpu_display_manager *dm,
3533 struct dc_link *link)
3535 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3536 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3538 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3539 link->type != dc_connection_none) {
3541 * Event if registration failed, we should continue with
3542 * DM initialization because not having a backlight control
3543 * is better then a black screen.
3545 amdgpu_dm_register_backlight_device(dm);
3547 if (dm->backlight_dev)
3548 dm->backlight_link = link;
3555 * In this architecture, the association
3556 * connector -> encoder -> crtc
3557 * id not really requried. The crtc and connector will hold the
3558 * display_index as an abstraction to use with DAL component
3560 * Returns 0 on success
3562 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3564 struct amdgpu_display_manager *dm = &adev->dm;
3566 struct amdgpu_dm_connector *aconnector = NULL;
3567 struct amdgpu_encoder *aencoder = NULL;
3568 struct amdgpu_mode_info *mode_info = &adev->mode_info;
3570 int32_t primary_planes;
3571 enum dc_connection_type new_connection_type = dc_connection_none;
3572 const struct dc_plane_cap *plane;
3574 dm->display_indexes_num = dm->dc->caps.max_streams;
3575 /* Update the actual used number of crtc */
3576 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3578 link_cnt = dm->dc->caps.max_links;
3579 if (amdgpu_dm_mode_config_init(dm->adev)) {
3580 DRM_ERROR("DM: Failed to initialize mode config\n");
3584 /* There is one primary plane per CRTC */
3585 primary_planes = dm->dc->caps.max_streams;
3586 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3589 * Initialize primary planes, implicit planes for legacy IOCTLS.
3590 * Order is reversed to match iteration order in atomic check.
3592 for (i = (primary_planes - 1); i >= 0; i--) {
3593 plane = &dm->dc->caps.planes[i];
3595 if (initialize_plane(dm, mode_info, i,
3596 DRM_PLANE_TYPE_PRIMARY, plane)) {
3597 DRM_ERROR("KMS: Failed to initialize primary plane\n");
3603 * Initialize overlay planes, index starting after primary planes.
3604 * These planes have a higher DRM index than the primary planes since
3605 * they should be considered as having a higher z-order.
3606 * Order is reversed to match iteration order in atomic check.
3608 * Only support DCN for now, and only expose one so we don't encourage
3609 * userspace to use up all the pipes.
3611 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3612 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3614 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3617 if (!plane->blends_with_above || !plane->blends_with_below)
3620 if (!plane->pixel_format_support.argb8888)
3623 if (initialize_plane(dm, NULL, primary_planes + i,
3624 DRM_PLANE_TYPE_OVERLAY, plane)) {
3625 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3629 /* Only create one overlay plane. */
3633 for (i = 0; i < dm->dc->caps.max_streams; i++)
3634 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3635 DRM_ERROR("KMS: Failed to initialize crtc\n");
3639 /* loops over all connectors on the board */
3640 for (i = 0; i < link_cnt; i++) {
3641 struct dc_link *link = NULL;
3643 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3645 "KMS: Cannot support more than %d display indexes\n",
3646 AMDGPU_DM_MAX_DISPLAY_INDEX);
3650 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3654 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3658 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3659 DRM_ERROR("KMS: Failed to initialize encoder\n");
3663 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3664 DRM_ERROR("KMS: Failed to initialize connector\n");
3668 link = dc_get_link_at_index(dm->dc, i);
3670 if (!dc_link_detect_sink(link, &new_connection_type))
3671 DRM_ERROR("KMS: Failed to detect connector\n");
3673 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3674 emulated_link_detect(link);
3675 amdgpu_dm_update_connector_after_detect(aconnector);
3677 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3678 amdgpu_dm_update_connector_after_detect(aconnector);
3679 register_backlight_device(dm, link);
3680 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3681 amdgpu_dm_set_psr_caps(link);
3687 /* Software is initialized. Now we can register interrupt handlers. */
3688 switch (adev->asic_type) {
3689 #if defined(CONFIG_DRM_AMD_DC_SI)
3694 if (dce60_register_irq_handlers(dm->adev)) {
3695 DRM_ERROR("DM: Failed to initialize IRQ\n");
3709 case CHIP_POLARIS11:
3710 case CHIP_POLARIS10:
3711 case CHIP_POLARIS12:
3716 if (dce110_register_irq_handlers(dm->adev)) {
3717 DRM_ERROR("DM: Failed to initialize IRQ\n");
3721 #if defined(CONFIG_DRM_AMD_DC_DCN)
3727 case CHIP_SIENNA_CICHLID:
3728 case CHIP_NAVY_FLOUNDER:
3729 case CHIP_DIMGREY_CAVEFISH:
3731 if (dcn10_register_irq_handlers(dm->adev)) {
3732 DRM_ERROR("DM: Failed to initialize IRQ\n");
3738 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3750 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3752 drm_mode_config_cleanup(dm->ddev);
3753 drm_atomic_private_obj_fini(&dm->atomic_obj);
3757 /******************************************************************************
3758 * amdgpu_display_funcs functions
3759 *****************************************************************************/
3762 * dm_bandwidth_update - program display watermarks
3764 * @adev: amdgpu_device pointer
3766 * Calculate and program the display watermarks and line buffer allocation.
3768 static void dm_bandwidth_update(struct amdgpu_device *adev)
3770 /* TODO: implement later */
3773 static const struct amdgpu_display_funcs dm_display_funcs = {
3774 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3775 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3776 .backlight_set_level = NULL, /* never called for DC */
3777 .backlight_get_level = NULL, /* never called for DC */
3778 .hpd_sense = NULL,/* called unconditionally */
3779 .hpd_set_polarity = NULL, /* called unconditionally */
3780 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3781 .page_flip_get_scanoutpos =
3782 dm_crtc_get_scanoutpos,/* called unconditionally */
3783 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3784 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3787 #if defined(CONFIG_DEBUG_KERNEL_DC)
3789 static ssize_t s3_debug_store(struct device *device,
3790 struct device_attribute *attr,
3796 struct drm_device *drm_dev = dev_get_drvdata(device);
3797 struct amdgpu_device *adev = drm_to_adev(drm_dev);
3799 ret = kstrtoint(buf, 0, &s3_state);
3804 drm_kms_helper_hotplug_event(adev_to_drm(adev));
3809 return ret == 0 ? count : 0;
3812 DEVICE_ATTR_WO(s3_debug);
3816 static int dm_early_init(void *handle)
3818 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3820 switch (adev->asic_type) {
3821 #if defined(CONFIG_DRM_AMD_DC_SI)
3825 adev->mode_info.num_crtc = 6;
3826 adev->mode_info.num_hpd = 6;
3827 adev->mode_info.num_dig = 6;
3830 adev->mode_info.num_crtc = 2;
3831 adev->mode_info.num_hpd = 2;
3832 adev->mode_info.num_dig = 2;
3837 adev->mode_info.num_crtc = 6;
3838 adev->mode_info.num_hpd = 6;
3839 adev->mode_info.num_dig = 6;
3842 adev->mode_info.num_crtc = 4;
3843 adev->mode_info.num_hpd = 6;
3844 adev->mode_info.num_dig = 7;
3848 adev->mode_info.num_crtc = 2;
3849 adev->mode_info.num_hpd = 6;
3850 adev->mode_info.num_dig = 6;
3854 adev->mode_info.num_crtc = 6;
3855 adev->mode_info.num_hpd = 6;
3856 adev->mode_info.num_dig = 7;
3859 adev->mode_info.num_crtc = 3;
3860 adev->mode_info.num_hpd = 6;
3861 adev->mode_info.num_dig = 9;
3864 adev->mode_info.num_crtc = 2;
3865 adev->mode_info.num_hpd = 6;
3866 adev->mode_info.num_dig = 9;
3868 case CHIP_POLARIS11:
3869 case CHIP_POLARIS12:
3870 adev->mode_info.num_crtc = 5;
3871 adev->mode_info.num_hpd = 5;
3872 adev->mode_info.num_dig = 5;
3874 case CHIP_POLARIS10:
3876 adev->mode_info.num_crtc = 6;
3877 adev->mode_info.num_hpd = 6;
3878 adev->mode_info.num_dig = 6;
3883 adev->mode_info.num_crtc = 6;
3884 adev->mode_info.num_hpd = 6;
3885 adev->mode_info.num_dig = 6;
3887 #if defined(CONFIG_DRM_AMD_DC_DCN)
3891 adev->mode_info.num_crtc = 4;
3892 adev->mode_info.num_hpd = 4;
3893 adev->mode_info.num_dig = 4;
3897 case CHIP_SIENNA_CICHLID:
3898 case CHIP_NAVY_FLOUNDER:
3899 adev->mode_info.num_crtc = 6;
3900 adev->mode_info.num_hpd = 6;
3901 adev->mode_info.num_dig = 6;
3904 case CHIP_DIMGREY_CAVEFISH:
3905 adev->mode_info.num_crtc = 5;
3906 adev->mode_info.num_hpd = 5;
3907 adev->mode_info.num_dig = 5;
3911 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3915 amdgpu_dm_set_irq_funcs(adev);
3917 if (adev->mode_info.funcs == NULL)
3918 adev->mode_info.funcs = &dm_display_funcs;
3921 * Note: Do NOT change adev->audio_endpt_rreg and
3922 * adev->audio_endpt_wreg because they are initialised in
3923 * amdgpu_device_init()
3925 #if defined(CONFIG_DEBUG_KERNEL_DC)
3927 adev_to_drm(adev)->dev,
3928 &dev_attr_s3_debug);
3934 static bool modeset_required(struct drm_crtc_state *crtc_state,
3935 struct dc_stream_state *new_stream,
3936 struct dc_stream_state *old_stream)
3938 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3941 static bool modereset_required(struct drm_crtc_state *crtc_state)
3943 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3946 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3948 drm_encoder_cleanup(encoder);
3952 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3953 .destroy = amdgpu_dm_encoder_destroy,
3957 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
3958 struct drm_framebuffer *fb,
3959 int *min_downscale, int *max_upscale)
3961 struct amdgpu_device *adev = drm_to_adev(dev);
3962 struct dc *dc = adev->dm.dc;
3963 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
3964 struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
3966 switch (fb->format->format) {
3967 case DRM_FORMAT_P010:
3968 case DRM_FORMAT_NV12:
3969 case DRM_FORMAT_NV21:
3970 *max_upscale = plane_cap->max_upscale_factor.nv12;
3971 *min_downscale = plane_cap->max_downscale_factor.nv12;
3974 case DRM_FORMAT_XRGB16161616F:
3975 case DRM_FORMAT_ARGB16161616F:
3976 case DRM_FORMAT_XBGR16161616F:
3977 case DRM_FORMAT_ABGR16161616F:
3978 *max_upscale = plane_cap->max_upscale_factor.fp16;
3979 *min_downscale = plane_cap->max_downscale_factor.fp16;
3983 *max_upscale = plane_cap->max_upscale_factor.argb8888;
3984 *min_downscale = plane_cap->max_downscale_factor.argb8888;
3989 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
3990 * scaling factor of 1.0 == 1000 units.
3992 if (*max_upscale == 1)
3993 *max_upscale = 1000;
3995 if (*min_downscale == 1)
3996 *min_downscale = 1000;
4000 static int fill_dc_scaling_info(const struct drm_plane_state *state,
4001 struct dc_scaling_info *scaling_info)
4003 int scale_w, scale_h, min_downscale, max_upscale;
4005 memset(scaling_info, 0, sizeof(*scaling_info));
4007 /* Source is fixed 16.16 but we ignore mantissa for now... */
4008 scaling_info->src_rect.x = state->src_x >> 16;
4009 scaling_info->src_rect.y = state->src_y >> 16;
4011 scaling_info->src_rect.width = state->src_w >> 16;
4012 if (scaling_info->src_rect.width == 0)
4015 scaling_info->src_rect.height = state->src_h >> 16;
4016 if (scaling_info->src_rect.height == 0)
4019 scaling_info->dst_rect.x = state->crtc_x;
4020 scaling_info->dst_rect.y = state->crtc_y;
4022 if (state->crtc_w == 0)
4025 scaling_info->dst_rect.width = state->crtc_w;
4027 if (state->crtc_h == 0)
4030 scaling_info->dst_rect.height = state->crtc_h;
4032 /* DRM doesn't specify clipping on destination output. */
4033 scaling_info->clip_rect = scaling_info->dst_rect;
4035 /* Validate scaling per-format with DC plane caps */
4036 if (state->plane && state->plane->dev && state->fb) {
4037 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4038 &min_downscale, &max_upscale);
4040 min_downscale = 250;
4041 max_upscale = 16000;
4044 scale_w = scaling_info->dst_rect.width * 1000 /
4045 scaling_info->src_rect.width;
4047 if (scale_w < min_downscale || scale_w > max_upscale)
4050 scale_h = scaling_info->dst_rect.height * 1000 /
4051 scaling_info->src_rect.height;
4053 if (scale_h < min_downscale || scale_h > max_upscale)
4057 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4058 * assume reasonable defaults based on the format.
4065 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4066 uint64_t tiling_flags)
4068 /* Fill GFX8 params */
4069 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4070 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4072 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4073 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4074 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4075 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4076 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4078 /* XXX fix me for VI */
4079 tiling_info->gfx8.num_banks = num_banks;
4080 tiling_info->gfx8.array_mode =
4081 DC_ARRAY_2D_TILED_THIN1;
4082 tiling_info->gfx8.tile_split = tile_split;
4083 tiling_info->gfx8.bank_width = bankw;
4084 tiling_info->gfx8.bank_height = bankh;
4085 tiling_info->gfx8.tile_aspect = mtaspect;
4086 tiling_info->gfx8.tile_mode =
4087 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4088 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4089 == DC_ARRAY_1D_TILED_THIN1) {
4090 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4093 tiling_info->gfx8.pipe_config =
4094 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4098 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4099 union dc_tiling_info *tiling_info)
4101 tiling_info->gfx9.num_pipes =
4102 adev->gfx.config.gb_addr_config_fields.num_pipes;
4103 tiling_info->gfx9.num_banks =
4104 adev->gfx.config.gb_addr_config_fields.num_banks;
4105 tiling_info->gfx9.pipe_interleave =
4106 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4107 tiling_info->gfx9.num_shader_engines =
4108 adev->gfx.config.gb_addr_config_fields.num_se;
4109 tiling_info->gfx9.max_compressed_frags =
4110 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4111 tiling_info->gfx9.num_rb_per_se =
4112 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4113 tiling_info->gfx9.shaderEnable = 1;
4114 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4115 adev->asic_type == CHIP_NAVY_FLOUNDER ||
4116 adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4117 adev->asic_type == CHIP_VANGOGH)
4118 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4122 validate_dcc(struct amdgpu_device *adev,
4123 const enum surface_pixel_format format,
4124 const enum dc_rotation_angle rotation,
4125 const union dc_tiling_info *tiling_info,
4126 const struct dc_plane_dcc_param *dcc,
4127 const struct dc_plane_address *address,
4128 const struct plane_size *plane_size)
4130 struct dc *dc = adev->dm.dc;
4131 struct dc_dcc_surface_param input;
4132 struct dc_surface_dcc_cap output;
4134 memset(&input, 0, sizeof(input));
4135 memset(&output, 0, sizeof(output));
4140 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4141 !dc->cap_funcs.get_dcc_compression_cap)
4144 input.format = format;
4145 input.surface_size.width = plane_size->surface_size.width;
4146 input.surface_size.height = plane_size->surface_size.height;
4147 input.swizzle_mode = tiling_info->gfx9.swizzle;
4149 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4150 input.scan = SCAN_DIRECTION_HORIZONTAL;
4151 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4152 input.scan = SCAN_DIRECTION_VERTICAL;
4154 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4157 if (!output.capable)
4160 if (dcc->independent_64b_blks == 0 &&
4161 output.grph.rgb.independent_64b_blks != 0)
4168 modifier_has_dcc(uint64_t modifier)
4170 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4174 modifier_gfx9_swizzle_mode(uint64_t modifier)
4176 if (modifier == DRM_FORMAT_MOD_LINEAR)
4179 return AMD_FMT_MOD_GET(TILE, modifier);
4182 static const struct drm_format_info *
4183 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4185 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4189 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4190 union dc_tiling_info *tiling_info,
4193 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4194 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4195 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4196 unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4198 fill_gfx9_tiling_info_from_device(adev, tiling_info);
4200 if (!IS_AMD_FMT_MOD(modifier))
4203 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4204 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4206 if (adev->family >= AMDGPU_FAMILY_NV) {
4207 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4209 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4211 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4215 enum dm_micro_swizzle {
4216 MICRO_SWIZZLE_Z = 0,
4217 MICRO_SWIZZLE_S = 1,
4218 MICRO_SWIZZLE_D = 2,
4222 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4226 struct amdgpu_device *adev = drm_to_adev(plane->dev);
4227 const struct drm_format_info *info = drm_format_info(format);
4229 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4235 * We always have to allow this modifier, because core DRM still
4236 * checks LINEAR support if userspace does not provide modifers.
4238 if (modifier == DRM_FORMAT_MOD_LINEAR)
4242 * The arbitrary tiling support for multiplane formats has not been hooked
4245 if (info->num_planes > 1)
4249 * For D swizzle the canonical modifier depends on the bpp, so check
4252 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4253 adev->family >= AMDGPU_FAMILY_NV) {
4254 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4258 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4262 if (modifier_has_dcc(modifier)) {
4263 /* Per radeonsi comments 16/64 bpp are more complicated. */
4264 if (info->cpp[0] != 4)
4272 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4277 if (*cap - *size < 1) {
4278 uint64_t new_cap = *cap * 2;
4279 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4287 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4293 (*mods)[*size] = mod;
4298 add_gfx9_modifiers(const struct amdgpu_device *adev,
4299 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4301 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4302 int pipe_xor_bits = min(8, pipes +
4303 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4304 int bank_xor_bits = min(8 - pipe_xor_bits,
4305 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4306 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4307 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4310 if (adev->family == AMDGPU_FAMILY_RV) {
4311 /* Raven2 and later */
4312 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4315 * No _D DCC swizzles yet because we only allow 32bpp, which
4316 * doesn't support _D on DCN
4319 if (has_constant_encode) {
4320 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4321 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4322 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4323 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4324 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4325 AMD_FMT_MOD_SET(DCC, 1) |
4326 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4327 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4328 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4331 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4332 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4333 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4334 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4335 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4336 AMD_FMT_MOD_SET(DCC, 1) |
4337 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4338 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4339 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4341 if (has_constant_encode) {
4342 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4343 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4344 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4345 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4346 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4347 AMD_FMT_MOD_SET(DCC, 1) |
4348 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4349 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4350 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4352 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4353 AMD_FMT_MOD_SET(RB, rb) |
4354 AMD_FMT_MOD_SET(PIPE, pipes));
4357 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4358 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4359 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4360 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4361 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4362 AMD_FMT_MOD_SET(DCC, 1) |
4363 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4364 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4365 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4366 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4367 AMD_FMT_MOD_SET(RB, rb) |
4368 AMD_FMT_MOD_SET(PIPE, pipes));
4372 * Only supported for 64bpp on Raven, will be filtered on format in
4373 * dm_plane_format_mod_supported.
4375 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4376 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4377 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4378 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4379 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4381 if (adev->family == AMDGPU_FAMILY_RV) {
4382 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4383 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4384 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4385 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4386 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4390 * Only supported for 64bpp on Raven, will be filtered on format in
4391 * dm_plane_format_mod_supported.
4393 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4394 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4395 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4397 if (adev->family == AMDGPU_FAMILY_RV) {
4398 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4399 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4400 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4405 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4406 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4408 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4410 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4411 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4412 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4413 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4414 AMD_FMT_MOD_SET(DCC, 1) |
4415 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4416 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4417 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4419 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4420 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4421 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4422 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4423 AMD_FMT_MOD_SET(DCC, 1) |
4424 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4425 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4426 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4427 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4429 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4430 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4431 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4432 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4434 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4435 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4436 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4437 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4440 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4441 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4442 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4443 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4445 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4446 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4447 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4451 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4452 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4454 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4455 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4457 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4458 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4459 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4460 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4461 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4462 AMD_FMT_MOD_SET(DCC, 1) |
4463 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4464 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4465 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4466 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4468 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4469 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4470 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4471 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4472 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4473 AMD_FMT_MOD_SET(DCC, 1) |
4474 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4475 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4476 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4477 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4478 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4480 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4481 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4482 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4483 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4484 AMD_FMT_MOD_SET(PACKERS, pkrs));
4486 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4487 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4488 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4489 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4490 AMD_FMT_MOD_SET(PACKERS, pkrs));
4492 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4493 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4494 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4495 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4497 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4498 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4499 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4503 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4505 uint64_t size = 0, capacity = 128;
4508 /* We have not hooked up any pre-GFX9 modifiers. */
4509 if (adev->family < AMDGPU_FAMILY_AI)
4512 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4514 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4515 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4516 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4517 return *mods ? 0 : -ENOMEM;
4520 switch (adev->family) {
4521 case AMDGPU_FAMILY_AI:
4522 case AMDGPU_FAMILY_RV:
4523 add_gfx9_modifiers(adev, mods, &size, &capacity);
4525 case AMDGPU_FAMILY_NV:
4526 case AMDGPU_FAMILY_VGH:
4527 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4528 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4530 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4534 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4536 /* INVALID marks the end of the list. */
4537 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4546 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4547 const struct amdgpu_framebuffer *afb,
4548 const enum surface_pixel_format format,
4549 const enum dc_rotation_angle rotation,
4550 const struct plane_size *plane_size,
4551 union dc_tiling_info *tiling_info,
4552 struct dc_plane_dcc_param *dcc,
4553 struct dc_plane_address *address,
4554 const bool force_disable_dcc)
4556 const uint64_t modifier = afb->base.modifier;
4559 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4560 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4562 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4563 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4566 dcc->meta_pitch = afb->base.pitches[1];
4567 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4569 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4570 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4573 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4581 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4582 const struct amdgpu_framebuffer *afb,
4583 const enum surface_pixel_format format,
4584 const enum dc_rotation_angle rotation,
4585 const uint64_t tiling_flags,
4586 union dc_tiling_info *tiling_info,
4587 struct plane_size *plane_size,
4588 struct dc_plane_dcc_param *dcc,
4589 struct dc_plane_address *address,
4591 bool force_disable_dcc)
4593 const struct drm_framebuffer *fb = &afb->base;
4596 memset(tiling_info, 0, sizeof(*tiling_info));
4597 memset(plane_size, 0, sizeof(*plane_size));
4598 memset(dcc, 0, sizeof(*dcc));
4599 memset(address, 0, sizeof(*address));
4601 address->tmz_surface = tmz_surface;
4603 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4604 uint64_t addr = afb->address + fb->offsets[0];
4606 plane_size->surface_size.x = 0;
4607 plane_size->surface_size.y = 0;
4608 plane_size->surface_size.width = fb->width;
4609 plane_size->surface_size.height = fb->height;
4610 plane_size->surface_pitch =
4611 fb->pitches[0] / fb->format->cpp[0];
4613 address->type = PLN_ADDR_TYPE_GRAPHICS;
4614 address->grph.addr.low_part = lower_32_bits(addr);
4615 address->grph.addr.high_part = upper_32_bits(addr);
4616 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4617 uint64_t luma_addr = afb->address + fb->offsets[0];
4618 uint64_t chroma_addr = afb->address + fb->offsets[1];
4620 plane_size->surface_size.x = 0;
4621 plane_size->surface_size.y = 0;
4622 plane_size->surface_size.width = fb->width;
4623 plane_size->surface_size.height = fb->height;
4624 plane_size->surface_pitch =
4625 fb->pitches[0] / fb->format->cpp[0];
4627 plane_size->chroma_size.x = 0;
4628 plane_size->chroma_size.y = 0;
4629 /* TODO: set these based on surface format */
4630 plane_size->chroma_size.width = fb->width / 2;
4631 plane_size->chroma_size.height = fb->height / 2;
4633 plane_size->chroma_pitch =
4634 fb->pitches[1] / fb->format->cpp[1];
4636 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4637 address->video_progressive.luma_addr.low_part =
4638 lower_32_bits(luma_addr);
4639 address->video_progressive.luma_addr.high_part =
4640 upper_32_bits(luma_addr);
4641 address->video_progressive.chroma_addr.low_part =
4642 lower_32_bits(chroma_addr);
4643 address->video_progressive.chroma_addr.high_part =
4644 upper_32_bits(chroma_addr);
4647 if (adev->family >= AMDGPU_FAMILY_AI) {
4648 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4649 rotation, plane_size,
4656 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4663 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4664 bool *per_pixel_alpha, bool *global_alpha,
4665 int *global_alpha_value)
4667 *per_pixel_alpha = false;
4668 *global_alpha = false;
4669 *global_alpha_value = 0xff;
4671 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4674 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4675 static const uint32_t alpha_formats[] = {
4676 DRM_FORMAT_ARGB8888,
4677 DRM_FORMAT_RGBA8888,
4678 DRM_FORMAT_ABGR8888,
4680 uint32_t format = plane_state->fb->format->format;
4683 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4684 if (format == alpha_formats[i]) {
4685 *per_pixel_alpha = true;
4691 if (plane_state->alpha < 0xffff) {
4692 *global_alpha = true;
4693 *global_alpha_value = plane_state->alpha >> 8;
4698 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4699 const enum surface_pixel_format format,
4700 enum dc_color_space *color_space)
4704 *color_space = COLOR_SPACE_SRGB;
4706 /* DRM color properties only affect non-RGB formats. */
4707 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4710 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4712 switch (plane_state->color_encoding) {
4713 case DRM_COLOR_YCBCR_BT601:
4715 *color_space = COLOR_SPACE_YCBCR601;
4717 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4720 case DRM_COLOR_YCBCR_BT709:
4722 *color_space = COLOR_SPACE_YCBCR709;
4724 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4727 case DRM_COLOR_YCBCR_BT2020:
4729 *color_space = COLOR_SPACE_2020_YCBCR;
4742 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4743 const struct drm_plane_state *plane_state,
4744 const uint64_t tiling_flags,
4745 struct dc_plane_info *plane_info,
4746 struct dc_plane_address *address,
4748 bool force_disable_dcc)
4750 const struct drm_framebuffer *fb = plane_state->fb;
4751 const struct amdgpu_framebuffer *afb =
4752 to_amdgpu_framebuffer(plane_state->fb);
4755 memset(plane_info, 0, sizeof(*plane_info));
4757 switch (fb->format->format) {
4759 plane_info->format =
4760 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4762 case DRM_FORMAT_RGB565:
4763 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4765 case DRM_FORMAT_XRGB8888:
4766 case DRM_FORMAT_ARGB8888:
4767 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4769 case DRM_FORMAT_XRGB2101010:
4770 case DRM_FORMAT_ARGB2101010:
4771 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4773 case DRM_FORMAT_XBGR2101010:
4774 case DRM_FORMAT_ABGR2101010:
4775 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4777 case DRM_FORMAT_XBGR8888:
4778 case DRM_FORMAT_ABGR8888:
4779 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4781 case DRM_FORMAT_NV21:
4782 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4784 case DRM_FORMAT_NV12:
4785 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4787 case DRM_FORMAT_P010:
4788 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4790 case DRM_FORMAT_XRGB16161616F:
4791 case DRM_FORMAT_ARGB16161616F:
4792 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4794 case DRM_FORMAT_XBGR16161616F:
4795 case DRM_FORMAT_ABGR16161616F:
4796 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4800 "Unsupported screen format %p4cc\n",
4801 &fb->format->format);
4805 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4806 case DRM_MODE_ROTATE_0:
4807 plane_info->rotation = ROTATION_ANGLE_0;
4809 case DRM_MODE_ROTATE_90:
4810 plane_info->rotation = ROTATION_ANGLE_90;
4812 case DRM_MODE_ROTATE_180:
4813 plane_info->rotation = ROTATION_ANGLE_180;
4815 case DRM_MODE_ROTATE_270:
4816 plane_info->rotation = ROTATION_ANGLE_270;
4819 plane_info->rotation = ROTATION_ANGLE_0;
4823 plane_info->visible = true;
4824 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4826 plane_info->layer_index = 0;
4828 ret = fill_plane_color_attributes(plane_state, plane_info->format,
4829 &plane_info->color_space);
4833 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4834 plane_info->rotation, tiling_flags,
4835 &plane_info->tiling_info,
4836 &plane_info->plane_size,
4837 &plane_info->dcc, address, tmz_surface,
4842 fill_blending_from_plane_state(
4843 plane_state, &plane_info->per_pixel_alpha,
4844 &plane_info->global_alpha, &plane_info->global_alpha_value);
4849 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4850 struct dc_plane_state *dc_plane_state,
4851 struct drm_plane_state *plane_state,
4852 struct drm_crtc_state *crtc_state)
4854 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4855 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4856 struct dc_scaling_info scaling_info;
4857 struct dc_plane_info plane_info;
4859 bool force_disable_dcc = false;
4861 ret = fill_dc_scaling_info(plane_state, &scaling_info);
4865 dc_plane_state->src_rect = scaling_info.src_rect;
4866 dc_plane_state->dst_rect = scaling_info.dst_rect;
4867 dc_plane_state->clip_rect = scaling_info.clip_rect;
4868 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4870 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4871 ret = fill_dc_plane_info_and_addr(adev, plane_state,
4874 &dc_plane_state->address,
4880 dc_plane_state->format = plane_info.format;
4881 dc_plane_state->color_space = plane_info.color_space;
4882 dc_plane_state->format = plane_info.format;
4883 dc_plane_state->plane_size = plane_info.plane_size;
4884 dc_plane_state->rotation = plane_info.rotation;
4885 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4886 dc_plane_state->stereo_format = plane_info.stereo_format;
4887 dc_plane_state->tiling_info = plane_info.tiling_info;
4888 dc_plane_state->visible = plane_info.visible;
4889 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4890 dc_plane_state->global_alpha = plane_info.global_alpha;
4891 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4892 dc_plane_state->dcc = plane_info.dcc;
4893 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4894 dc_plane_state->flip_int_enabled = true;
4897 * Always set input transfer function, since plane state is refreshed
4900 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4907 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4908 const struct dm_connector_state *dm_state,
4909 struct dc_stream_state *stream)
4911 enum amdgpu_rmx_type rmx_type;
4913 struct rect src = { 0 }; /* viewport in composition space*/
4914 struct rect dst = { 0 }; /* stream addressable area */
4916 /* no mode. nothing to be done */
4920 /* Full screen scaling by default */
4921 src.width = mode->hdisplay;
4922 src.height = mode->vdisplay;
4923 dst.width = stream->timing.h_addressable;
4924 dst.height = stream->timing.v_addressable;
4927 rmx_type = dm_state->scaling;
4928 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4929 if (src.width * dst.height <
4930 src.height * dst.width) {
4931 /* height needs less upscaling/more downscaling */
4932 dst.width = src.width *
4933 dst.height / src.height;
4935 /* width needs less upscaling/more downscaling */
4936 dst.height = src.height *
4937 dst.width / src.width;
4939 } else if (rmx_type == RMX_CENTER) {
4943 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4944 dst.y = (stream->timing.v_addressable - dst.height) / 2;
4946 if (dm_state->underscan_enable) {
4947 dst.x += dm_state->underscan_hborder / 2;
4948 dst.y += dm_state->underscan_vborder / 2;
4949 dst.width -= dm_state->underscan_hborder;
4950 dst.height -= dm_state->underscan_vborder;
4957 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
4958 dst.x, dst.y, dst.width, dst.height);
4962 static enum dc_color_depth
4963 convert_color_depth_from_display_info(const struct drm_connector *connector,
4964 bool is_y420, int requested_bpc)
4971 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4972 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4974 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4976 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4979 bpc = (uint8_t)connector->display_info.bpc;
4980 /* Assume 8 bpc by default if no bpc is specified. */
4981 bpc = bpc ? bpc : 8;
4984 if (requested_bpc > 0) {
4986 * Cap display bpc based on the user requested value.
4988 * The value for state->max_bpc may not correctly updated
4989 * depending on when the connector gets added to the state
4990 * or if this was called outside of atomic check, so it
4991 * can't be used directly.
4993 bpc = min_t(u8, bpc, requested_bpc);
4995 /* Round down to the nearest even number. */
4996 bpc = bpc - (bpc & 1);
5002 * Temporary Work around, DRM doesn't parse color depth for
5003 * EDID revision before 1.4
5004 * TODO: Fix edid parsing
5006 return COLOR_DEPTH_888;
5008 return COLOR_DEPTH_666;
5010 return COLOR_DEPTH_888;
5012 return COLOR_DEPTH_101010;
5014 return COLOR_DEPTH_121212;
5016 return COLOR_DEPTH_141414;
5018 return COLOR_DEPTH_161616;
5020 return COLOR_DEPTH_UNDEFINED;
5024 static enum dc_aspect_ratio
5025 get_aspect_ratio(const struct drm_display_mode *mode_in)
5027 /* 1-1 mapping, since both enums follow the HDMI spec. */
5028 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5031 static enum dc_color_space
5032 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5034 enum dc_color_space color_space = COLOR_SPACE_SRGB;
5036 switch (dc_crtc_timing->pixel_encoding) {
5037 case PIXEL_ENCODING_YCBCR422:
5038 case PIXEL_ENCODING_YCBCR444:
5039 case PIXEL_ENCODING_YCBCR420:
5042 * 27030khz is the separation point between HDTV and SDTV
5043 * according to HDMI spec, we use YCbCr709 and YCbCr601
5046 if (dc_crtc_timing->pix_clk_100hz > 270300) {
5047 if (dc_crtc_timing->flags.Y_ONLY)
5049 COLOR_SPACE_YCBCR709_LIMITED;
5051 color_space = COLOR_SPACE_YCBCR709;
5053 if (dc_crtc_timing->flags.Y_ONLY)
5055 COLOR_SPACE_YCBCR601_LIMITED;
5057 color_space = COLOR_SPACE_YCBCR601;
5062 case PIXEL_ENCODING_RGB:
5063 color_space = COLOR_SPACE_SRGB;
5074 static bool adjust_colour_depth_from_display_info(
5075 struct dc_crtc_timing *timing_out,
5076 const struct drm_display_info *info)
5078 enum dc_color_depth depth = timing_out->display_color_depth;
5081 normalized_clk = timing_out->pix_clk_100hz / 10;
5082 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5083 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5084 normalized_clk /= 2;
5085 /* Adjusting pix clock following on HDMI spec based on colour depth */
5087 case COLOR_DEPTH_888:
5089 case COLOR_DEPTH_101010:
5090 normalized_clk = (normalized_clk * 30) / 24;
5092 case COLOR_DEPTH_121212:
5093 normalized_clk = (normalized_clk * 36) / 24;
5095 case COLOR_DEPTH_161616:
5096 normalized_clk = (normalized_clk * 48) / 24;
5099 /* The above depths are the only ones valid for HDMI. */
5102 if (normalized_clk <= info->max_tmds_clock) {
5103 timing_out->display_color_depth = depth;
5106 } while (--depth > COLOR_DEPTH_666);
5110 static void fill_stream_properties_from_drm_display_mode(
5111 struct dc_stream_state *stream,
5112 const struct drm_display_mode *mode_in,
5113 const struct drm_connector *connector,
5114 const struct drm_connector_state *connector_state,
5115 const struct dc_stream_state *old_stream,
5118 struct dc_crtc_timing *timing_out = &stream->timing;
5119 const struct drm_display_info *info = &connector->display_info;
5120 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5121 struct hdmi_vendor_infoframe hv_frame;
5122 struct hdmi_avi_infoframe avi_frame;
5124 memset(&hv_frame, 0, sizeof(hv_frame));
5125 memset(&avi_frame, 0, sizeof(avi_frame));
5127 timing_out->h_border_left = 0;
5128 timing_out->h_border_right = 0;
5129 timing_out->v_border_top = 0;
5130 timing_out->v_border_bottom = 0;
5131 /* TODO: un-hardcode */
5132 if (drm_mode_is_420_only(info, mode_in)
5133 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5134 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5135 else if (drm_mode_is_420_also(info, mode_in)
5136 && aconnector->force_yuv420_output)
5137 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5138 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5139 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5140 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5142 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5144 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5145 timing_out->display_color_depth = convert_color_depth_from_display_info(
5147 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5149 timing_out->scan_type = SCANNING_TYPE_NODATA;
5150 timing_out->hdmi_vic = 0;
5153 timing_out->vic = old_stream->timing.vic;
5154 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5155 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5157 timing_out->vic = drm_match_cea_mode(mode_in);
5158 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5159 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5160 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5161 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5164 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5165 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5166 timing_out->vic = avi_frame.video_code;
5167 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5168 timing_out->hdmi_vic = hv_frame.vic;
5171 if (is_freesync_video_mode(mode_in, aconnector)) {
5172 timing_out->h_addressable = mode_in->hdisplay;
5173 timing_out->h_total = mode_in->htotal;
5174 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5175 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5176 timing_out->v_total = mode_in->vtotal;
5177 timing_out->v_addressable = mode_in->vdisplay;
5178 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5179 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5180 timing_out->pix_clk_100hz = mode_in->clock * 10;
5182 timing_out->h_addressable = mode_in->crtc_hdisplay;
5183 timing_out->h_total = mode_in->crtc_htotal;
5184 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5185 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5186 timing_out->v_total = mode_in->crtc_vtotal;
5187 timing_out->v_addressable = mode_in->crtc_vdisplay;
5188 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5189 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5190 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5193 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5195 stream->output_color_space = get_output_color_space(timing_out);
5197 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5198 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5199 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5200 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5201 drm_mode_is_420_also(info, mode_in) &&
5202 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5203 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5204 adjust_colour_depth_from_display_info(timing_out, info);
5209 static void fill_audio_info(struct audio_info *audio_info,
5210 const struct drm_connector *drm_connector,
5211 const struct dc_sink *dc_sink)
5214 int cea_revision = 0;
5215 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5217 audio_info->manufacture_id = edid_caps->manufacturer_id;
5218 audio_info->product_id = edid_caps->product_id;
5220 cea_revision = drm_connector->display_info.cea_rev;
5222 strscpy(audio_info->display_name,
5223 edid_caps->display_name,
5224 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5226 if (cea_revision >= 3) {
5227 audio_info->mode_count = edid_caps->audio_mode_count;
5229 for (i = 0; i < audio_info->mode_count; ++i) {
5230 audio_info->modes[i].format_code =
5231 (enum audio_format_code)
5232 (edid_caps->audio_modes[i].format_code);
5233 audio_info->modes[i].channel_count =
5234 edid_caps->audio_modes[i].channel_count;
5235 audio_info->modes[i].sample_rates.all =
5236 edid_caps->audio_modes[i].sample_rate;
5237 audio_info->modes[i].sample_size =
5238 edid_caps->audio_modes[i].sample_size;
5242 audio_info->flags.all = edid_caps->speaker_flags;
5244 /* TODO: We only check for the progressive mode, check for interlace mode too */
5245 if (drm_connector->latency_present[0]) {
5246 audio_info->video_latency = drm_connector->video_latency[0];
5247 audio_info->audio_latency = drm_connector->audio_latency[0];
5250 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5255 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5256 struct drm_display_mode *dst_mode)
5258 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5259 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5260 dst_mode->crtc_clock = src_mode->crtc_clock;
5261 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5262 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5263 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
5264 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5265 dst_mode->crtc_htotal = src_mode->crtc_htotal;
5266 dst_mode->crtc_hskew = src_mode->crtc_hskew;
5267 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5268 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5269 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5270 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5271 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5275 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5276 const struct drm_display_mode *native_mode,
5279 if (scale_enabled) {
5280 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5281 } else if (native_mode->clock == drm_mode->clock &&
5282 native_mode->htotal == drm_mode->htotal &&
5283 native_mode->vtotal == drm_mode->vtotal) {
5284 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5286 /* no scaling nor amdgpu inserted, no need to patch */
5290 static struct dc_sink *
5291 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5293 struct dc_sink_init_data sink_init_data = { 0 };
5294 struct dc_sink *sink = NULL;
5295 sink_init_data.link = aconnector->dc_link;
5296 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5298 sink = dc_sink_create(&sink_init_data);
5300 DRM_ERROR("Failed to create sink!\n");
5303 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5308 static void set_multisync_trigger_params(
5309 struct dc_stream_state *stream)
5311 struct dc_stream_state *master = NULL;
5313 if (stream->triggered_crtc_reset.enabled) {
5314 master = stream->triggered_crtc_reset.event_source;
5315 stream->triggered_crtc_reset.event =
5316 master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5317 CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5318 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5322 static void set_master_stream(struct dc_stream_state *stream_set[],
5325 int j, highest_rfr = 0, master_stream = 0;
5327 for (j = 0; j < stream_count; j++) {
5328 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5329 int refresh_rate = 0;
5331 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5332 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5333 if (refresh_rate > highest_rfr) {
5334 highest_rfr = refresh_rate;
5339 for (j = 0; j < stream_count; j++) {
5341 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5345 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5348 struct dc_stream_state *stream;
5350 if (context->stream_count < 2)
5352 for (i = 0; i < context->stream_count ; i++) {
5353 if (!context->streams[i])
5356 * TODO: add a function to read AMD VSDB bits and set
5357 * crtc_sync_master.multi_sync_enabled flag
5358 * For now it's set to false
5362 set_master_stream(context->streams, context->stream_count);
5364 for (i = 0; i < context->stream_count ; i++) {
5365 stream = context->streams[i];
5370 set_multisync_trigger_params(stream);
5374 static struct drm_display_mode *
5375 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5376 bool use_probed_modes)
5378 struct drm_display_mode *m, *m_pref = NULL;
5379 u16 current_refresh, highest_refresh;
5380 struct list_head *list_head = use_probed_modes ?
5381 &aconnector->base.probed_modes :
5382 &aconnector->base.modes;
5384 if (aconnector->freesync_vid_base.clock != 0)
5385 return &aconnector->freesync_vid_base;
5387 /* Find the preferred mode */
5388 list_for_each_entry (m, list_head, head) {
5389 if (m->type & DRM_MODE_TYPE_PREFERRED) {
5396 /* Probably an EDID with no preferred mode. Fallback to first entry */
5397 m_pref = list_first_entry_or_null(
5398 &aconnector->base.modes, struct drm_display_mode, head);
5400 DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5405 highest_refresh = drm_mode_vrefresh(m_pref);
5408 * Find the mode with highest refresh rate with same resolution.
5409 * For some monitors, preferred mode is not the mode with highest
5410 * supported refresh rate.
5412 list_for_each_entry (m, list_head, head) {
5413 current_refresh = drm_mode_vrefresh(m);
5415 if (m->hdisplay == m_pref->hdisplay &&
5416 m->vdisplay == m_pref->vdisplay &&
5417 highest_refresh < current_refresh) {
5418 highest_refresh = current_refresh;
5423 aconnector->freesync_vid_base = *m_pref;
5427 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
5428 struct amdgpu_dm_connector *aconnector)
5430 struct drm_display_mode *high_mode;
5433 high_mode = get_highest_refresh_rate_mode(aconnector, false);
5434 if (!high_mode || !mode)
5437 timing_diff = high_mode->vtotal - mode->vtotal;
5439 if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5440 high_mode->hdisplay != mode->hdisplay ||
5441 high_mode->vdisplay != mode->vdisplay ||
5442 high_mode->hsync_start != mode->hsync_start ||
5443 high_mode->hsync_end != mode->hsync_end ||
5444 high_mode->htotal != mode->htotal ||
5445 high_mode->hskew != mode->hskew ||
5446 high_mode->vscan != mode->vscan ||
5447 high_mode->vsync_start - mode->vsync_start != timing_diff ||
5448 high_mode->vsync_end - mode->vsync_end != timing_diff)
5454 static struct dc_stream_state *
5455 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5456 const struct drm_display_mode *drm_mode,
5457 const struct dm_connector_state *dm_state,
5458 const struct dc_stream_state *old_stream,
5461 struct drm_display_mode *preferred_mode = NULL;
5462 struct drm_connector *drm_connector;
5463 const struct drm_connector_state *con_state =
5464 dm_state ? &dm_state->base : NULL;
5465 struct dc_stream_state *stream = NULL;
5466 struct drm_display_mode mode = *drm_mode;
5467 struct drm_display_mode saved_mode;
5468 struct drm_display_mode *freesync_mode = NULL;
5469 bool native_mode_found = false;
5470 bool recalculate_timing = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5472 int preferred_refresh = 0;
5473 #if defined(CONFIG_DRM_AMD_DC_DCN)
5474 struct dsc_dec_dpcd_caps dsc_caps;
5475 uint32_t link_bandwidth_kbps;
5477 struct dc_sink *sink = NULL;
5479 memset(&saved_mode, 0, sizeof(saved_mode));
5481 if (aconnector == NULL) {
5482 DRM_ERROR("aconnector is NULL!\n");
5486 drm_connector = &aconnector->base;
5488 if (!aconnector->dc_sink) {
5489 sink = create_fake_sink(aconnector);
5493 sink = aconnector->dc_sink;
5494 dc_sink_retain(sink);
5497 stream = dc_create_stream_for_sink(sink);
5499 if (stream == NULL) {
5500 DRM_ERROR("Failed to create stream for sink!\n");
5504 stream->dm_stream_context = aconnector;
5506 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5507 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5509 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5510 /* Search for preferred mode */
5511 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5512 native_mode_found = true;
5516 if (!native_mode_found)
5517 preferred_mode = list_first_entry_or_null(
5518 &aconnector->base.modes,
5519 struct drm_display_mode,
5522 mode_refresh = drm_mode_vrefresh(&mode);
5524 if (preferred_mode == NULL) {
5526 * This may not be an error, the use case is when we have no
5527 * usermode calls to reset and set mode upon hotplug. In this
5528 * case, we call set mode ourselves to restore the previous mode
5529 * and the modelist may not be filled in in time.
5531 DRM_DEBUG_DRIVER("No preferred mode found\n");
5533 recalculate_timing |= amdgpu_freesync_vid_mode &&
5534 is_freesync_video_mode(&mode, aconnector);
5535 if (recalculate_timing) {
5536 freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5538 mode = *freesync_mode;
5540 decide_crtc_timing_for_drm_display_mode(
5541 &mode, preferred_mode,
5542 dm_state ? (dm_state->scaling != RMX_OFF) : false);
5545 preferred_refresh = drm_mode_vrefresh(preferred_mode);
5548 if (recalculate_timing)
5549 drm_mode_set_crtcinfo(&saved_mode, 0);
5551 drm_mode_set_crtcinfo(&mode, 0);
5554 * If scaling is enabled and refresh rate didn't change
5555 * we copy the vic and polarities of the old timings
5557 if (!recalculate_timing || mode_refresh != preferred_refresh)
5558 fill_stream_properties_from_drm_display_mode(
5559 stream, &mode, &aconnector->base, con_state, NULL,
5562 fill_stream_properties_from_drm_display_mode(
5563 stream, &mode, &aconnector->base, con_state, old_stream,
5566 stream->timing.flags.DSC = 0;
5568 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5569 #if defined(CONFIG_DRM_AMD_DC_DCN)
5570 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5571 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5572 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5574 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5575 dc_link_get_link_cap(aconnector->dc_link));
5577 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5578 /* Set DSC policy according to dsc_clock_en */
5579 dc_dsc_policy_set_enable_dsc_when_not_needed(
5580 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5582 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5584 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5586 link_bandwidth_kbps,
5588 &stream->timing.dsc_cfg))
5589 stream->timing.flags.DSC = 1;
5590 /* Overwrite the stream flag if DSC is enabled through debugfs */
5591 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5592 stream->timing.flags.DSC = 1;
5594 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5595 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5597 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5598 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5600 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5601 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5606 update_stream_scaling_settings(&mode, dm_state, stream);
5609 &stream->audio_info,
5613 update_stream_signal(stream, sink);
5615 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5616 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5618 if (stream->link->psr_settings.psr_feature_enabled) {
5620 // should decide stream support vsc sdp colorimetry capability
5621 // before building vsc info packet
5623 stream->use_vsc_sdp_for_colorimetry = false;
5624 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5625 stream->use_vsc_sdp_for_colorimetry =
5626 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5628 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5629 stream->use_vsc_sdp_for_colorimetry = true;
5631 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5634 dc_sink_release(sink);
5639 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5641 drm_crtc_cleanup(crtc);
5645 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5646 struct drm_crtc_state *state)
5648 struct dm_crtc_state *cur = to_dm_crtc_state(state);
5650 /* TODO Destroy dc_stream objects are stream object is flattened */
5652 dc_stream_release(cur->stream);
5655 __drm_atomic_helper_crtc_destroy_state(state);
5661 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5663 struct dm_crtc_state *state;
5666 dm_crtc_destroy_state(crtc, crtc->state);
5668 state = kzalloc(sizeof(*state), GFP_KERNEL);
5669 if (WARN_ON(!state))
5672 __drm_atomic_helper_crtc_reset(crtc, &state->base);
5675 static struct drm_crtc_state *
5676 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5678 struct dm_crtc_state *state, *cur;
5680 cur = to_dm_crtc_state(crtc->state);
5682 if (WARN_ON(!crtc->state))
5685 state = kzalloc(sizeof(*state), GFP_KERNEL);
5689 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5692 state->stream = cur->stream;
5693 dc_stream_retain(state->stream);
5696 state->active_planes = cur->active_planes;
5697 state->vrr_infopacket = cur->vrr_infopacket;
5698 state->abm_level = cur->abm_level;
5699 state->vrr_supported = cur->vrr_supported;
5700 state->freesync_config = cur->freesync_config;
5701 state->cm_has_degamma = cur->cm_has_degamma;
5702 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5703 /* TODO Duplicate dc_stream after objects are stream object is flattened */
5705 return &state->base;
5708 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
5709 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
5711 crtc_debugfs_init(crtc);
5717 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5719 enum dc_irq_source irq_source;
5720 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5721 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5724 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5726 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5728 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
5729 acrtc->crtc_id, enable ? "en" : "dis", rc);
5733 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5735 enum dc_irq_source irq_source;
5736 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5737 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5738 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5739 #if defined(CONFIG_DRM_AMD_DC_DCN)
5740 struct amdgpu_display_manager *dm = &adev->dm;
5741 unsigned long flags;
5746 /* vblank irq on -> Only need vupdate irq in vrr mode */
5747 if (amdgpu_dm_vrr_active(acrtc_state))
5748 rc = dm_set_vupdate_irq(crtc, true);
5750 /* vblank irq off -> vupdate irq off */
5751 rc = dm_set_vupdate_irq(crtc, false);
5757 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5759 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5762 if (amdgpu_in_reset(adev))
5765 #if defined(CONFIG_DRM_AMD_DC_DCN)
5766 spin_lock_irqsave(&dm->vblank_lock, flags);
5767 dm->vblank_workqueue->dm = dm;
5768 dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
5769 dm->vblank_workqueue->enable = enable;
5770 spin_unlock_irqrestore(&dm->vblank_lock, flags);
5771 schedule_work(&dm->vblank_workqueue->mall_work);
5777 static int dm_enable_vblank(struct drm_crtc *crtc)
5779 return dm_set_vblank(crtc, true);
5782 static void dm_disable_vblank(struct drm_crtc *crtc)
5784 dm_set_vblank(crtc, false);
5787 /* Implemented only the options currently availible for the driver */
5788 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5789 .reset = dm_crtc_reset_state,
5790 .destroy = amdgpu_dm_crtc_destroy,
5791 .set_config = drm_atomic_helper_set_config,
5792 .page_flip = drm_atomic_helper_page_flip,
5793 .atomic_duplicate_state = dm_crtc_duplicate_state,
5794 .atomic_destroy_state = dm_crtc_destroy_state,
5795 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
5796 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5797 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5798 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
5799 .enable_vblank = dm_enable_vblank,
5800 .disable_vblank = dm_disable_vblank,
5801 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5802 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
5803 .late_register = amdgpu_dm_crtc_late_register,
5807 static enum drm_connector_status
5808 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5811 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5815 * 1. This interface is NOT called in context of HPD irq.
5816 * 2. This interface *is called* in context of user-mode ioctl. Which
5817 * makes it a bad place for *any* MST-related activity.
5820 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5821 !aconnector->fake_enable)
5822 connected = (aconnector->dc_sink != NULL);
5824 connected = (aconnector->base.force == DRM_FORCE_ON);
5826 update_subconnector_property(aconnector);
5828 return (connected ? connector_status_connected :
5829 connector_status_disconnected);
5832 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5833 struct drm_connector_state *connector_state,
5834 struct drm_property *property,
5837 struct drm_device *dev = connector->dev;
5838 struct amdgpu_device *adev = drm_to_adev(dev);
5839 struct dm_connector_state *dm_old_state =
5840 to_dm_connector_state(connector->state);
5841 struct dm_connector_state *dm_new_state =
5842 to_dm_connector_state(connector_state);
5846 if (property == dev->mode_config.scaling_mode_property) {
5847 enum amdgpu_rmx_type rmx_type;
5850 case DRM_MODE_SCALE_CENTER:
5851 rmx_type = RMX_CENTER;
5853 case DRM_MODE_SCALE_ASPECT:
5854 rmx_type = RMX_ASPECT;
5856 case DRM_MODE_SCALE_FULLSCREEN:
5857 rmx_type = RMX_FULL;
5859 case DRM_MODE_SCALE_NONE:
5865 if (dm_old_state->scaling == rmx_type)
5868 dm_new_state->scaling = rmx_type;
5870 } else if (property == adev->mode_info.underscan_hborder_property) {
5871 dm_new_state->underscan_hborder = val;
5873 } else if (property == adev->mode_info.underscan_vborder_property) {
5874 dm_new_state->underscan_vborder = val;
5876 } else if (property == adev->mode_info.underscan_property) {
5877 dm_new_state->underscan_enable = val;
5879 } else if (property == adev->mode_info.abm_level_property) {
5880 dm_new_state->abm_level = val;
5887 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5888 const struct drm_connector_state *state,
5889 struct drm_property *property,
5892 struct drm_device *dev = connector->dev;
5893 struct amdgpu_device *adev = drm_to_adev(dev);
5894 struct dm_connector_state *dm_state =
5895 to_dm_connector_state(state);
5898 if (property == dev->mode_config.scaling_mode_property) {
5899 switch (dm_state->scaling) {
5901 *val = DRM_MODE_SCALE_CENTER;
5904 *val = DRM_MODE_SCALE_ASPECT;
5907 *val = DRM_MODE_SCALE_FULLSCREEN;
5911 *val = DRM_MODE_SCALE_NONE;
5915 } else if (property == adev->mode_info.underscan_hborder_property) {
5916 *val = dm_state->underscan_hborder;
5918 } else if (property == adev->mode_info.underscan_vborder_property) {
5919 *val = dm_state->underscan_vborder;
5921 } else if (property == adev->mode_info.underscan_property) {
5922 *val = dm_state->underscan_enable;
5924 } else if (property == adev->mode_info.abm_level_property) {
5925 *val = dm_state->abm_level;
5932 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5934 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5936 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5939 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5941 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5942 const struct dc_link *link = aconnector->dc_link;
5943 struct amdgpu_device *adev = drm_to_adev(connector->dev);
5944 struct amdgpu_display_manager *dm = &adev->dm;
5947 * Call only if mst_mgr was iniitalized before since it's not done
5948 * for all connector types.
5950 if (aconnector->mst_mgr.dev)
5951 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5953 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5954 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5956 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5957 link->type != dc_connection_none &&
5958 dm->backlight_dev) {
5959 backlight_device_unregister(dm->backlight_dev);
5960 dm->backlight_dev = NULL;
5964 if (aconnector->dc_em_sink)
5965 dc_sink_release(aconnector->dc_em_sink);
5966 aconnector->dc_em_sink = NULL;
5967 if (aconnector->dc_sink)
5968 dc_sink_release(aconnector->dc_sink);
5969 aconnector->dc_sink = NULL;
5971 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5972 drm_connector_unregister(connector);
5973 drm_connector_cleanup(connector);
5974 if (aconnector->i2c) {
5975 i2c_del_adapter(&aconnector->i2c->base);
5976 kfree(aconnector->i2c);
5978 kfree(aconnector->dm_dp_aux.aux.name);
5983 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5985 struct dm_connector_state *state =
5986 to_dm_connector_state(connector->state);
5988 if (connector->state)
5989 __drm_atomic_helper_connector_destroy_state(connector->state);
5993 state = kzalloc(sizeof(*state), GFP_KERNEL);
5996 state->scaling = RMX_OFF;
5997 state->underscan_enable = false;
5998 state->underscan_hborder = 0;
5999 state->underscan_vborder = 0;
6000 state->base.max_requested_bpc = 8;
6001 state->vcpi_slots = 0;
6003 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6004 state->abm_level = amdgpu_dm_abm_level;
6006 __drm_atomic_helper_connector_reset(connector, &state->base);
6010 struct drm_connector_state *
6011 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6013 struct dm_connector_state *state =
6014 to_dm_connector_state(connector->state);
6016 struct dm_connector_state *new_state =
6017 kmemdup(state, sizeof(*state), GFP_KERNEL);
6022 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6024 new_state->freesync_capable = state->freesync_capable;
6025 new_state->abm_level = state->abm_level;
6026 new_state->scaling = state->scaling;
6027 new_state->underscan_enable = state->underscan_enable;
6028 new_state->underscan_hborder = state->underscan_hborder;
6029 new_state->underscan_vborder = state->underscan_vborder;
6030 new_state->vcpi_slots = state->vcpi_slots;
6031 new_state->pbn = state->pbn;
6032 return &new_state->base;
6036 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6038 struct amdgpu_dm_connector *amdgpu_dm_connector =
6039 to_amdgpu_dm_connector(connector);
6042 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6043 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6044 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6045 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6050 #if defined(CONFIG_DEBUG_FS)
6051 connector_debugfs_init(amdgpu_dm_connector);
6057 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6058 .reset = amdgpu_dm_connector_funcs_reset,
6059 .detect = amdgpu_dm_connector_detect,
6060 .fill_modes = drm_helper_probe_single_connector_modes,
6061 .destroy = amdgpu_dm_connector_destroy,
6062 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6063 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6064 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6065 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6066 .late_register = amdgpu_dm_connector_late_register,
6067 .early_unregister = amdgpu_dm_connector_unregister
6070 static int get_modes(struct drm_connector *connector)
6072 return amdgpu_dm_connector_get_modes(connector);
6075 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6077 struct dc_sink_init_data init_params = {
6078 .link = aconnector->dc_link,
6079 .sink_signal = SIGNAL_TYPE_VIRTUAL
6083 if (!aconnector->base.edid_blob_ptr) {
6084 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6085 aconnector->base.name);
6087 aconnector->base.force = DRM_FORCE_OFF;
6088 aconnector->base.override_edid = false;
6092 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6094 aconnector->edid = edid;
6096 aconnector->dc_em_sink = dc_link_add_remote_sink(
6097 aconnector->dc_link,
6099 (edid->extensions + 1) * EDID_LENGTH,
6102 if (aconnector->base.force == DRM_FORCE_ON) {
6103 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6104 aconnector->dc_link->local_sink :
6105 aconnector->dc_em_sink;
6106 dc_sink_retain(aconnector->dc_sink);
6110 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6112 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6115 * In case of headless boot with force on for DP managed connector
6116 * Those settings have to be != 0 to get initial modeset
6118 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6119 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6120 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6124 aconnector->base.override_edid = true;
6125 create_eml_sink(aconnector);
6128 static struct dc_stream_state *
6129 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6130 const struct drm_display_mode *drm_mode,
6131 const struct dm_connector_state *dm_state,
6132 const struct dc_stream_state *old_stream)
6134 struct drm_connector *connector = &aconnector->base;
6135 struct amdgpu_device *adev = drm_to_adev(connector->dev);
6136 struct dc_stream_state *stream;
6137 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6138 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6139 enum dc_status dc_result = DC_OK;
6142 stream = create_stream_for_sink(aconnector, drm_mode,
6143 dm_state, old_stream,
6145 if (stream == NULL) {
6146 DRM_ERROR("Failed to create stream for sink!\n");
6150 dc_result = dc_validate_stream(adev->dm.dc, stream);
6152 if (dc_result != DC_OK) {
6153 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6158 dc_status_to_str(dc_result));
6160 dc_stream_release(stream);
6162 requested_bpc -= 2; /* lower bpc to retry validation */
6165 } while (stream == NULL && requested_bpc >= 6);
6167 if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6168 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6170 aconnector->force_yuv420_output = true;
6171 stream = create_validate_stream_for_sink(aconnector, drm_mode,
6172 dm_state, old_stream);
6173 aconnector->force_yuv420_output = false;
6179 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6180 struct drm_display_mode *mode)
6182 int result = MODE_ERROR;
6183 struct dc_sink *dc_sink;
6184 /* TODO: Unhardcode stream count */
6185 struct dc_stream_state *stream;
6186 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6188 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6189 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6193 * Only run this the first time mode_valid is called to initilialize
6196 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6197 !aconnector->dc_em_sink)
6198 handle_edid_mgmt(aconnector);
6200 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6202 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6203 aconnector->base.force != DRM_FORCE_ON) {
6204 DRM_ERROR("dc_sink is NULL!\n");
6208 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6210 dc_stream_release(stream);
6215 /* TODO: error handling*/
6219 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6220 struct dc_info_packet *out)
6222 struct hdmi_drm_infoframe frame;
6223 unsigned char buf[30]; /* 26 + 4 */
6227 memset(out, 0, sizeof(*out));
6229 if (!state->hdr_output_metadata)
6232 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6236 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6240 /* Static metadata is a fixed 26 bytes + 4 byte header. */
6244 /* Prepare the infopacket for DC. */
6245 switch (state->connector->connector_type) {
6246 case DRM_MODE_CONNECTOR_HDMIA:
6247 out->hb0 = 0x87; /* type */
6248 out->hb1 = 0x01; /* version */
6249 out->hb2 = 0x1A; /* length */
6250 out->sb[0] = buf[3]; /* checksum */
6254 case DRM_MODE_CONNECTOR_DisplayPort:
6255 case DRM_MODE_CONNECTOR_eDP:
6256 out->hb0 = 0x00; /* sdp id, zero */
6257 out->hb1 = 0x87; /* type */
6258 out->hb2 = 0x1D; /* payload len - 1 */
6259 out->hb3 = (0x13 << 2); /* sdp version */
6260 out->sb[0] = 0x01; /* version */
6261 out->sb[1] = 0x1A; /* length */
6269 memcpy(&out->sb[i], &buf[4], 26);
6272 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6273 sizeof(out->sb), false);
6279 is_hdr_metadata_different(const struct drm_connector_state *old_state,
6280 const struct drm_connector_state *new_state)
6282 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
6283 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
6285 if (old_blob != new_blob) {
6286 if (old_blob && new_blob &&
6287 old_blob->length == new_blob->length)
6288 return memcmp(old_blob->data, new_blob->data,
6298 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6299 struct drm_atomic_state *state)
6301 struct drm_connector_state *new_con_state =
6302 drm_atomic_get_new_connector_state(state, conn);
6303 struct drm_connector_state *old_con_state =
6304 drm_atomic_get_old_connector_state(state, conn);
6305 struct drm_crtc *crtc = new_con_state->crtc;
6306 struct drm_crtc_state *new_crtc_state;
6309 trace_amdgpu_dm_connector_atomic_check(new_con_state);
6314 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
6315 struct dc_info_packet hdr_infopacket;
6317 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6321 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6322 if (IS_ERR(new_crtc_state))
6323 return PTR_ERR(new_crtc_state);
6326 * DC considers the stream backends changed if the
6327 * static metadata changes. Forcing the modeset also
6328 * gives a simple way for userspace to switch from
6329 * 8bpc to 10bpc when setting the metadata to enter
6332 * Changing the static metadata after it's been
6333 * set is permissible, however. So only force a
6334 * modeset if we're entering or exiting HDR.
6336 new_crtc_state->mode_changed =
6337 !old_con_state->hdr_output_metadata ||
6338 !new_con_state->hdr_output_metadata;
6344 static const struct drm_connector_helper_funcs
6345 amdgpu_dm_connector_helper_funcs = {
6347 * If hotplugging a second bigger display in FB Con mode, bigger resolution
6348 * modes will be filtered by drm_mode_validate_size(), and those modes
6349 * are missing after user start lightdm. So we need to renew modes list.
6350 * in get_modes call back, not just return the modes count
6352 .get_modes = get_modes,
6353 .mode_valid = amdgpu_dm_connector_mode_valid,
6354 .atomic_check = amdgpu_dm_connector_atomic_check,
6357 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6361 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6363 struct drm_atomic_state *state = new_crtc_state->state;
6364 struct drm_plane *plane;
6367 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6368 struct drm_plane_state *new_plane_state;
6370 /* Cursor planes are "fake". */
6371 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6374 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6376 if (!new_plane_state) {
6378 * The plane is enable on the CRTC and hasn't changed
6379 * state. This means that it previously passed
6380 * validation and is therefore enabled.
6386 /* We need a framebuffer to be considered enabled. */
6387 num_active += (new_plane_state->fb != NULL);
6393 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6394 struct drm_crtc_state *new_crtc_state)
6396 struct dm_crtc_state *dm_new_crtc_state =
6397 to_dm_crtc_state(new_crtc_state);
6399 dm_new_crtc_state->active_planes = 0;
6401 if (!dm_new_crtc_state->stream)
6404 dm_new_crtc_state->active_planes =
6405 count_crtc_active_planes(new_crtc_state);
6408 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6409 struct drm_atomic_state *state)
6411 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6413 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6414 struct dc *dc = adev->dm.dc;
6415 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6418 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6420 dm_update_crtc_active_planes(crtc, crtc_state);
6422 if (unlikely(!dm_crtc_state->stream &&
6423 modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6429 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6430 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6431 * planes are disabled, which is not supported by the hardware. And there is legacy
6432 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6434 if (crtc_state->enable &&
6435 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6436 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6440 /* In some use cases, like reset, no stream is attached */
6441 if (!dm_crtc_state->stream)
6444 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6447 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6451 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6452 const struct drm_display_mode *mode,
6453 struct drm_display_mode *adjusted_mode)
6458 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6459 .disable = dm_crtc_helper_disable,
6460 .atomic_check = dm_crtc_helper_atomic_check,
6461 .mode_fixup = dm_crtc_helper_mode_fixup,
6462 .get_scanout_position = amdgpu_crtc_get_scanout_position,
6465 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6470 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6472 switch (display_color_depth) {
6473 case COLOR_DEPTH_666:
6475 case COLOR_DEPTH_888:
6477 case COLOR_DEPTH_101010:
6479 case COLOR_DEPTH_121212:
6481 case COLOR_DEPTH_141414:
6483 case COLOR_DEPTH_161616:
6491 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6492 struct drm_crtc_state *crtc_state,
6493 struct drm_connector_state *conn_state)
6495 struct drm_atomic_state *state = crtc_state->state;
6496 struct drm_connector *connector = conn_state->connector;
6497 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6498 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6499 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6500 struct drm_dp_mst_topology_mgr *mst_mgr;
6501 struct drm_dp_mst_port *mst_port;
6502 enum dc_color_depth color_depth;
6504 bool is_y420 = false;
6506 if (!aconnector->port || !aconnector->dc_sink)
6509 mst_port = aconnector->port;
6510 mst_mgr = &aconnector->mst_port->mst_mgr;
6512 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6515 if (!state->duplicated) {
6516 int max_bpc = conn_state->max_requested_bpc;
6517 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6518 aconnector->force_yuv420_output;
6519 color_depth = convert_color_depth_from_display_info(connector,
6522 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6523 clock = adjusted_mode->clock;
6524 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6526 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6529 dm_new_connector_state->pbn,
6530 dm_mst_get_pbn_divider(aconnector->dc_link));
6531 if (dm_new_connector_state->vcpi_slots < 0) {
6532 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6533 return dm_new_connector_state->vcpi_slots;
6538 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6539 .disable = dm_encoder_helper_disable,
6540 .atomic_check = dm_encoder_helper_atomic_check
6543 #if defined(CONFIG_DRM_AMD_DC_DCN)
6544 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6545 struct dc_state *dc_state)
6547 struct dc_stream_state *stream = NULL;
6548 struct drm_connector *connector;
6549 struct drm_connector_state *new_con_state, *old_con_state;
6550 struct amdgpu_dm_connector *aconnector;
6551 struct dm_connector_state *dm_conn_state;
6552 int i, j, clock, bpp;
6553 int vcpi, pbn_div, pbn = 0;
6555 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6557 aconnector = to_amdgpu_dm_connector(connector);
6559 if (!aconnector->port)
6562 if (!new_con_state || !new_con_state->crtc)
6565 dm_conn_state = to_dm_connector_state(new_con_state);
6567 for (j = 0; j < dc_state->stream_count; j++) {
6568 stream = dc_state->streams[j];
6572 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6581 if (stream->timing.flags.DSC != 1) {
6582 drm_dp_mst_atomic_enable_dsc(state,
6590 pbn_div = dm_mst_get_pbn_divider(stream->link);
6591 bpp = stream->timing.dsc_cfg.bits_per_pixel;
6592 clock = stream->timing.pix_clk_100hz / 10;
6593 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6594 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6601 dm_conn_state->pbn = pbn;
6602 dm_conn_state->vcpi_slots = vcpi;
6608 static void dm_drm_plane_reset(struct drm_plane *plane)
6610 struct dm_plane_state *amdgpu_state = NULL;
6613 plane->funcs->atomic_destroy_state(plane, plane->state);
6615 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6616 WARN_ON(amdgpu_state == NULL);
6619 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6622 static struct drm_plane_state *
6623 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6625 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6627 old_dm_plane_state = to_dm_plane_state(plane->state);
6628 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6629 if (!dm_plane_state)
6632 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6634 if (old_dm_plane_state->dc_state) {
6635 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6636 dc_plane_state_retain(dm_plane_state->dc_state);
6639 return &dm_plane_state->base;
6642 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6643 struct drm_plane_state *state)
6645 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6647 if (dm_plane_state->dc_state)
6648 dc_plane_state_release(dm_plane_state->dc_state);
6650 drm_atomic_helper_plane_destroy_state(plane, state);
6653 static const struct drm_plane_funcs dm_plane_funcs = {
6654 .update_plane = drm_atomic_helper_update_plane,
6655 .disable_plane = drm_atomic_helper_disable_plane,
6656 .destroy = drm_primary_helper_destroy,
6657 .reset = dm_drm_plane_reset,
6658 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6659 .atomic_destroy_state = dm_drm_plane_destroy_state,
6660 .format_mod_supported = dm_plane_format_mod_supported,
6663 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6664 struct drm_plane_state *new_state)
6666 struct amdgpu_framebuffer *afb;
6667 struct drm_gem_object *obj;
6668 struct amdgpu_device *adev;
6669 struct amdgpu_bo *rbo;
6670 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6671 struct list_head list;
6672 struct ttm_validate_buffer tv;
6673 struct ww_acquire_ctx ticket;
6677 if (!new_state->fb) {
6678 DRM_DEBUG_KMS("No FB bound\n");
6682 afb = to_amdgpu_framebuffer(new_state->fb);
6683 obj = new_state->fb->obj[0];
6684 rbo = gem_to_amdgpu_bo(obj);
6685 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6686 INIT_LIST_HEAD(&list);
6690 list_add(&tv.head, &list);
6692 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6694 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6698 if (plane->type != DRM_PLANE_TYPE_CURSOR)
6699 domain = amdgpu_display_supported_domains(adev, rbo->flags);
6701 domain = AMDGPU_GEM_DOMAIN_VRAM;
6703 r = amdgpu_bo_pin(rbo, domain);
6704 if (unlikely(r != 0)) {
6705 if (r != -ERESTARTSYS)
6706 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6707 ttm_eu_backoff_reservation(&ticket, &list);
6711 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6712 if (unlikely(r != 0)) {
6713 amdgpu_bo_unpin(rbo);
6714 ttm_eu_backoff_reservation(&ticket, &list);
6715 DRM_ERROR("%p bind failed\n", rbo);
6719 ttm_eu_backoff_reservation(&ticket, &list);
6721 afb->address = amdgpu_bo_gpu_offset(rbo);
6726 * We don't do surface updates on planes that have been newly created,
6727 * but we also don't have the afb->address during atomic check.
6729 * Fill in buffer attributes depending on the address here, but only on
6730 * newly created planes since they're not being used by DC yet and this
6731 * won't modify global state.
6733 dm_plane_state_old = to_dm_plane_state(plane->state);
6734 dm_plane_state_new = to_dm_plane_state(new_state);
6736 if (dm_plane_state_new->dc_state &&
6737 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6738 struct dc_plane_state *plane_state =
6739 dm_plane_state_new->dc_state;
6740 bool force_disable_dcc = !plane_state->dcc.enable;
6742 fill_plane_buffer_attributes(
6743 adev, afb, plane_state->format, plane_state->rotation,
6745 &plane_state->tiling_info, &plane_state->plane_size,
6746 &plane_state->dcc, &plane_state->address,
6747 afb->tmz_surface, force_disable_dcc);
6753 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6754 struct drm_plane_state *old_state)
6756 struct amdgpu_bo *rbo;
6762 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6763 r = amdgpu_bo_reserve(rbo, false);
6765 DRM_ERROR("failed to reserve rbo before unpin\n");
6769 amdgpu_bo_unpin(rbo);
6770 amdgpu_bo_unreserve(rbo);
6771 amdgpu_bo_unref(&rbo);
6774 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6775 struct drm_crtc_state *new_crtc_state)
6777 struct drm_framebuffer *fb = state->fb;
6778 int min_downscale, max_upscale;
6780 int max_scale = INT_MAX;
6782 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6783 if (fb && state->crtc) {
6784 /* Validate viewport to cover the case when only the position changes */
6785 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
6786 int viewport_width = state->crtc_w;
6787 int viewport_height = state->crtc_h;
6789 if (state->crtc_x < 0)
6790 viewport_width += state->crtc_x;
6791 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
6792 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
6794 if (state->crtc_y < 0)
6795 viewport_height += state->crtc_y;
6796 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
6797 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
6799 if (viewport_width < 0 || viewport_height < 0) {
6800 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
6802 } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
6803 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
6805 } else if (viewport_height < MIN_VIEWPORT_SIZE) {
6806 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
6812 /* Get min/max allowed scaling factors from plane caps. */
6813 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6814 &min_downscale, &max_upscale);
6816 * Convert to drm convention: 16.16 fixed point, instead of dc's
6817 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6818 * dst/src, so min_scale = 1.0 / max_upscale, etc.
6820 min_scale = (1000 << 16) / max_upscale;
6821 max_scale = (1000 << 16) / min_downscale;
6824 return drm_atomic_helper_check_plane_state(
6825 state, new_crtc_state, min_scale, max_scale, true, true);
6828 static int dm_plane_atomic_check(struct drm_plane *plane,
6829 struct drm_atomic_state *state)
6831 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
6833 struct amdgpu_device *adev = drm_to_adev(plane->dev);
6834 struct dc *dc = adev->dm.dc;
6835 struct dm_plane_state *dm_plane_state;
6836 struct dc_scaling_info scaling_info;
6837 struct drm_crtc_state *new_crtc_state;
6840 trace_amdgpu_dm_plane_atomic_check(new_plane_state);
6842 dm_plane_state = to_dm_plane_state(new_plane_state);
6844 if (!dm_plane_state->dc_state)
6848 drm_atomic_get_new_crtc_state(state,
6849 new_plane_state->crtc);
6850 if (!new_crtc_state)
6853 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
6857 ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
6861 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6867 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6868 struct drm_atomic_state *state)
6870 /* Only support async updates on cursor planes. */
6871 if (plane->type != DRM_PLANE_TYPE_CURSOR)
6877 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6878 struct drm_atomic_state *state)
6880 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
6882 struct drm_plane_state *old_state =
6883 drm_atomic_get_old_plane_state(state, plane);
6885 trace_amdgpu_dm_atomic_update_cursor(new_state);
6887 swap(plane->state->fb, new_state->fb);
6889 plane->state->src_x = new_state->src_x;
6890 plane->state->src_y = new_state->src_y;
6891 plane->state->src_w = new_state->src_w;
6892 plane->state->src_h = new_state->src_h;
6893 plane->state->crtc_x = new_state->crtc_x;
6894 plane->state->crtc_y = new_state->crtc_y;
6895 plane->state->crtc_w = new_state->crtc_w;
6896 plane->state->crtc_h = new_state->crtc_h;
6898 handle_cursor_update(plane, old_state);
6901 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6902 .prepare_fb = dm_plane_helper_prepare_fb,
6903 .cleanup_fb = dm_plane_helper_cleanup_fb,
6904 .atomic_check = dm_plane_atomic_check,
6905 .atomic_async_check = dm_plane_atomic_async_check,
6906 .atomic_async_update = dm_plane_atomic_async_update
6910 * TODO: these are currently initialized to rgb formats only.
6911 * For future use cases we should either initialize them dynamically based on
6912 * plane capabilities, or initialize this array to all formats, so internal drm
6913 * check will succeed, and let DC implement proper check
6915 static const uint32_t rgb_formats[] = {
6916 DRM_FORMAT_XRGB8888,
6917 DRM_FORMAT_ARGB8888,
6918 DRM_FORMAT_RGBA8888,
6919 DRM_FORMAT_XRGB2101010,
6920 DRM_FORMAT_XBGR2101010,
6921 DRM_FORMAT_ARGB2101010,
6922 DRM_FORMAT_ABGR2101010,
6923 DRM_FORMAT_XBGR8888,
6924 DRM_FORMAT_ABGR8888,
6928 static const uint32_t overlay_formats[] = {
6929 DRM_FORMAT_XRGB8888,
6930 DRM_FORMAT_ARGB8888,
6931 DRM_FORMAT_RGBA8888,
6932 DRM_FORMAT_XBGR8888,
6933 DRM_FORMAT_ABGR8888,
6937 static const u32 cursor_formats[] = {
6941 static int get_plane_formats(const struct drm_plane *plane,
6942 const struct dc_plane_cap *plane_cap,
6943 uint32_t *formats, int max_formats)
6945 int i, num_formats = 0;
6948 * TODO: Query support for each group of formats directly from
6949 * DC plane caps. This will require adding more formats to the
6953 switch (plane->type) {
6954 case DRM_PLANE_TYPE_PRIMARY:
6955 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6956 if (num_formats >= max_formats)
6959 formats[num_formats++] = rgb_formats[i];
6962 if (plane_cap && plane_cap->pixel_format_support.nv12)
6963 formats[num_formats++] = DRM_FORMAT_NV12;
6964 if (plane_cap && plane_cap->pixel_format_support.p010)
6965 formats[num_formats++] = DRM_FORMAT_P010;
6966 if (plane_cap && plane_cap->pixel_format_support.fp16) {
6967 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6968 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6969 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6970 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6974 case DRM_PLANE_TYPE_OVERLAY:
6975 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6976 if (num_formats >= max_formats)
6979 formats[num_formats++] = overlay_formats[i];
6983 case DRM_PLANE_TYPE_CURSOR:
6984 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6985 if (num_formats >= max_formats)
6988 formats[num_formats++] = cursor_formats[i];
6996 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6997 struct drm_plane *plane,
6998 unsigned long possible_crtcs,
6999 const struct dc_plane_cap *plane_cap)
7001 uint32_t formats[32];
7004 unsigned int supported_rotations;
7005 uint64_t *modifiers = NULL;
7007 num_formats = get_plane_formats(plane, plane_cap, formats,
7008 ARRAY_SIZE(formats));
7010 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7014 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7015 &dm_plane_funcs, formats, num_formats,
7016 modifiers, plane->type, NULL);
7021 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7022 plane_cap && plane_cap->per_pixel_alpha) {
7023 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7024 BIT(DRM_MODE_BLEND_PREMULTI);
7026 drm_plane_create_alpha_property(plane);
7027 drm_plane_create_blend_mode_property(plane, blend_caps);
7030 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7032 (plane_cap->pixel_format_support.nv12 ||
7033 plane_cap->pixel_format_support.p010)) {
7034 /* This only affects YUV formats. */
7035 drm_plane_create_color_properties(
7037 BIT(DRM_COLOR_YCBCR_BT601) |
7038 BIT(DRM_COLOR_YCBCR_BT709) |
7039 BIT(DRM_COLOR_YCBCR_BT2020),
7040 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7041 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7042 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7045 supported_rotations =
7046 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7047 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7049 if (dm->adev->asic_type >= CHIP_BONAIRE &&
7050 plane->type != DRM_PLANE_TYPE_CURSOR)
7051 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7052 supported_rotations);
7054 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7056 /* Create (reset) the plane state */
7057 if (plane->funcs->reset)
7058 plane->funcs->reset(plane);
7063 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7064 struct drm_plane *plane,
7065 uint32_t crtc_index)
7067 struct amdgpu_crtc *acrtc = NULL;
7068 struct drm_plane *cursor_plane;
7072 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7076 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7077 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7079 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7083 res = drm_crtc_init_with_planes(
7088 &amdgpu_dm_crtc_funcs, NULL);
7093 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7095 /* Create (reset) the plane state */
7096 if (acrtc->base.funcs->reset)
7097 acrtc->base.funcs->reset(&acrtc->base);
7099 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7100 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7102 acrtc->crtc_id = crtc_index;
7103 acrtc->base.enabled = false;
7104 acrtc->otg_inst = -1;
7106 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7107 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7108 true, MAX_COLOR_LUT_ENTRIES);
7109 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7115 kfree(cursor_plane);
7120 static int to_drm_connector_type(enum signal_type st)
7123 case SIGNAL_TYPE_HDMI_TYPE_A:
7124 return DRM_MODE_CONNECTOR_HDMIA;
7125 case SIGNAL_TYPE_EDP:
7126 return DRM_MODE_CONNECTOR_eDP;
7127 case SIGNAL_TYPE_LVDS:
7128 return DRM_MODE_CONNECTOR_LVDS;
7129 case SIGNAL_TYPE_RGB:
7130 return DRM_MODE_CONNECTOR_VGA;
7131 case SIGNAL_TYPE_DISPLAY_PORT:
7132 case SIGNAL_TYPE_DISPLAY_PORT_MST:
7133 return DRM_MODE_CONNECTOR_DisplayPort;
7134 case SIGNAL_TYPE_DVI_DUAL_LINK:
7135 case SIGNAL_TYPE_DVI_SINGLE_LINK:
7136 return DRM_MODE_CONNECTOR_DVID;
7137 case SIGNAL_TYPE_VIRTUAL:
7138 return DRM_MODE_CONNECTOR_VIRTUAL;
7141 return DRM_MODE_CONNECTOR_Unknown;
7145 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7147 struct drm_encoder *encoder;
7149 /* There is only one encoder per connector */
7150 drm_connector_for_each_possible_encoder(connector, encoder)
7156 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7158 struct drm_encoder *encoder;
7159 struct amdgpu_encoder *amdgpu_encoder;
7161 encoder = amdgpu_dm_connector_to_encoder(connector);
7163 if (encoder == NULL)
7166 amdgpu_encoder = to_amdgpu_encoder(encoder);
7168 amdgpu_encoder->native_mode.clock = 0;
7170 if (!list_empty(&connector->probed_modes)) {
7171 struct drm_display_mode *preferred_mode = NULL;
7173 list_for_each_entry(preferred_mode,
7174 &connector->probed_modes,
7176 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7177 amdgpu_encoder->native_mode = *preferred_mode;
7185 static struct drm_display_mode *
7186 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7188 int hdisplay, int vdisplay)
7190 struct drm_device *dev = encoder->dev;
7191 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7192 struct drm_display_mode *mode = NULL;
7193 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7195 mode = drm_mode_duplicate(dev, native_mode);
7200 mode->hdisplay = hdisplay;
7201 mode->vdisplay = vdisplay;
7202 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7203 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7209 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7210 struct drm_connector *connector)
7212 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7213 struct drm_display_mode *mode = NULL;
7214 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7215 struct amdgpu_dm_connector *amdgpu_dm_connector =
7216 to_amdgpu_dm_connector(connector);
7220 char name[DRM_DISPLAY_MODE_LEN];
7223 } common_modes[] = {
7224 { "640x480", 640, 480},
7225 { "800x600", 800, 600},
7226 { "1024x768", 1024, 768},
7227 { "1280x720", 1280, 720},
7228 { "1280x800", 1280, 800},
7229 {"1280x1024", 1280, 1024},
7230 { "1440x900", 1440, 900},
7231 {"1680x1050", 1680, 1050},
7232 {"1600x1200", 1600, 1200},
7233 {"1920x1080", 1920, 1080},
7234 {"1920x1200", 1920, 1200}
7237 n = ARRAY_SIZE(common_modes);
7239 for (i = 0; i < n; i++) {
7240 struct drm_display_mode *curmode = NULL;
7241 bool mode_existed = false;
7243 if (common_modes[i].w > native_mode->hdisplay ||
7244 common_modes[i].h > native_mode->vdisplay ||
7245 (common_modes[i].w == native_mode->hdisplay &&
7246 common_modes[i].h == native_mode->vdisplay))
7249 list_for_each_entry(curmode, &connector->probed_modes, head) {
7250 if (common_modes[i].w == curmode->hdisplay &&
7251 common_modes[i].h == curmode->vdisplay) {
7252 mode_existed = true;
7260 mode = amdgpu_dm_create_common_mode(encoder,
7261 common_modes[i].name, common_modes[i].w,
7263 drm_mode_probed_add(connector, mode);
7264 amdgpu_dm_connector->num_modes++;
7268 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7271 struct amdgpu_dm_connector *amdgpu_dm_connector =
7272 to_amdgpu_dm_connector(connector);
7275 /* empty probed_modes */
7276 INIT_LIST_HEAD(&connector->probed_modes);
7277 amdgpu_dm_connector->num_modes =
7278 drm_add_edid_modes(connector, edid);
7280 /* sorting the probed modes before calling function
7281 * amdgpu_dm_get_native_mode() since EDID can have
7282 * more than one preferred mode. The modes that are
7283 * later in the probed mode list could be of higher
7284 * and preferred resolution. For example, 3840x2160
7285 * resolution in base EDID preferred timing and 4096x2160
7286 * preferred resolution in DID extension block later.
7288 drm_mode_sort(&connector->probed_modes);
7289 amdgpu_dm_get_native_mode(connector);
7291 /* Freesync capabilities are reset by calling
7292 * drm_add_edid_modes() and need to be
7295 amdgpu_dm_update_freesync_caps(connector, edid);
7297 amdgpu_dm_connector->num_modes = 0;
7301 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7302 struct drm_display_mode *mode)
7304 struct drm_display_mode *m;
7306 list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7307 if (drm_mode_equal(m, mode))
7314 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7316 const struct drm_display_mode *m;
7317 struct drm_display_mode *new_mode;
7319 uint32_t new_modes_count = 0;
7321 /* Standard FPS values
7330 * 60 - Commonly used
7331 * 48,72,96 - Multiples of 24
7333 const uint32_t common_rates[] = { 23976, 24000, 25000, 29970, 30000,
7334 48000, 50000, 60000, 72000, 96000 };
7337 * Find mode with highest refresh rate with the same resolution
7338 * as the preferred mode. Some monitors report a preferred mode
7339 * with lower resolution than the highest refresh rate supported.
7342 m = get_highest_refresh_rate_mode(aconnector, true);
7346 for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7347 uint64_t target_vtotal, target_vtotal_diff;
7350 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7353 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7354 common_rates[i] > aconnector->max_vfreq * 1000)
7357 num = (unsigned long long)m->clock * 1000 * 1000;
7358 den = common_rates[i] * (unsigned long long)m->htotal;
7359 target_vtotal = div_u64(num, den);
7360 target_vtotal_diff = target_vtotal - m->vtotal;
7362 /* Check for illegal modes */
7363 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7364 m->vsync_end + target_vtotal_diff < m->vsync_start ||
7365 m->vtotal + target_vtotal_diff < m->vsync_end)
7368 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7372 new_mode->vtotal += (u16)target_vtotal_diff;
7373 new_mode->vsync_start += (u16)target_vtotal_diff;
7374 new_mode->vsync_end += (u16)target_vtotal_diff;
7375 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7376 new_mode->type |= DRM_MODE_TYPE_DRIVER;
7378 if (!is_duplicate_mode(aconnector, new_mode)) {
7379 drm_mode_probed_add(&aconnector->base, new_mode);
7380 new_modes_count += 1;
7382 drm_mode_destroy(aconnector->base.dev, new_mode);
7385 return new_modes_count;
7388 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7391 struct amdgpu_dm_connector *amdgpu_dm_connector =
7392 to_amdgpu_dm_connector(connector);
7394 if (!(amdgpu_freesync_vid_mode && edid))
7397 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7398 amdgpu_dm_connector->num_modes +=
7399 add_fs_modes(amdgpu_dm_connector);
7402 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
7404 struct amdgpu_dm_connector *amdgpu_dm_connector =
7405 to_amdgpu_dm_connector(connector);
7406 struct drm_encoder *encoder;
7407 struct edid *edid = amdgpu_dm_connector->edid;
7409 encoder = amdgpu_dm_connector_to_encoder(connector);
7411 if (!drm_edid_is_valid(edid)) {
7412 amdgpu_dm_connector->num_modes =
7413 drm_add_modes_noedid(connector, 640, 480);
7415 amdgpu_dm_connector_ddc_get_modes(connector, edid);
7416 amdgpu_dm_connector_add_common_modes(encoder, connector);
7417 amdgpu_dm_connector_add_freesync_modes(connector, edid);
7419 amdgpu_dm_fbc_init(connector);
7421 return amdgpu_dm_connector->num_modes;
7424 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7425 struct amdgpu_dm_connector *aconnector,
7427 struct dc_link *link,
7430 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7433 * Some of the properties below require access to state, like bpc.
7434 * Allocate some default initial connector state with our reset helper.
7436 if (aconnector->base.funcs->reset)
7437 aconnector->base.funcs->reset(&aconnector->base);
7439 aconnector->connector_id = link_index;
7440 aconnector->dc_link = link;
7441 aconnector->base.interlace_allowed = false;
7442 aconnector->base.doublescan_allowed = false;
7443 aconnector->base.stereo_allowed = false;
7444 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7445 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7446 aconnector->audio_inst = -1;
7447 mutex_init(&aconnector->hpd_lock);
7450 * configure support HPD hot plug connector_>polled default value is 0
7451 * which means HPD hot plug not supported
7453 switch (connector_type) {
7454 case DRM_MODE_CONNECTOR_HDMIA:
7455 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7456 aconnector->base.ycbcr_420_allowed =
7457 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7459 case DRM_MODE_CONNECTOR_DisplayPort:
7460 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7461 aconnector->base.ycbcr_420_allowed =
7462 link->link_enc->features.dp_ycbcr420_supported ? true : false;
7464 case DRM_MODE_CONNECTOR_DVID:
7465 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7471 drm_object_attach_property(&aconnector->base.base,
7472 dm->ddev->mode_config.scaling_mode_property,
7473 DRM_MODE_SCALE_NONE);
7475 drm_object_attach_property(&aconnector->base.base,
7476 adev->mode_info.underscan_property,
7478 drm_object_attach_property(&aconnector->base.base,
7479 adev->mode_info.underscan_hborder_property,
7481 drm_object_attach_property(&aconnector->base.base,
7482 adev->mode_info.underscan_vborder_property,
7485 if (!aconnector->mst_port)
7486 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7488 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
7489 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7490 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7492 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7493 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7494 drm_object_attach_property(&aconnector->base.base,
7495 adev->mode_info.abm_level_property, 0);
7498 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7499 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7500 connector_type == DRM_MODE_CONNECTOR_eDP) {
7501 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
7503 if (!aconnector->mst_port)
7504 drm_connector_attach_vrr_capable_property(&aconnector->base);
7506 #ifdef CONFIG_DRM_AMD_DC_HDCP
7507 if (adev->dm.hdcp_workqueue)
7508 drm_connector_attach_content_protection_property(&aconnector->base, true);
7513 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7514 struct i2c_msg *msgs, int num)
7516 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7517 struct ddc_service *ddc_service = i2c->ddc_service;
7518 struct i2c_command cmd;
7522 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7527 cmd.number_of_payloads = num;
7528 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7531 for (i = 0; i < num; i++) {
7532 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7533 cmd.payloads[i].address = msgs[i].addr;
7534 cmd.payloads[i].length = msgs[i].len;
7535 cmd.payloads[i].data = msgs[i].buf;
7539 ddc_service->ctx->dc,
7540 ddc_service->ddc_pin->hw_info.ddc_channel,
7544 kfree(cmd.payloads);
7548 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7550 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7553 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7554 .master_xfer = amdgpu_dm_i2c_xfer,
7555 .functionality = amdgpu_dm_i2c_func,
7558 static struct amdgpu_i2c_adapter *
7559 create_i2c(struct ddc_service *ddc_service,
7563 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7564 struct amdgpu_i2c_adapter *i2c;
7566 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7569 i2c->base.owner = THIS_MODULE;
7570 i2c->base.class = I2C_CLASS_DDC;
7571 i2c->base.dev.parent = &adev->pdev->dev;
7572 i2c->base.algo = &amdgpu_dm_i2c_algo;
7573 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7574 i2c_set_adapdata(&i2c->base, i2c);
7575 i2c->ddc_service = ddc_service;
7576 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7583 * Note: this function assumes that dc_link_detect() was called for the
7584 * dc_link which will be represented by this aconnector.
7586 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7587 struct amdgpu_dm_connector *aconnector,
7588 uint32_t link_index,
7589 struct amdgpu_encoder *aencoder)
7593 struct dc *dc = dm->dc;
7594 struct dc_link *link = dc_get_link_at_index(dc, link_index);
7595 struct amdgpu_i2c_adapter *i2c;
7597 link->priv = aconnector;
7599 DRM_DEBUG_DRIVER("%s()\n", __func__);
7601 i2c = create_i2c(link->ddc, link->link_index, &res);
7603 DRM_ERROR("Failed to create i2c adapter data\n");
7607 aconnector->i2c = i2c;
7608 res = i2c_add_adapter(&i2c->base);
7611 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7615 connector_type = to_drm_connector_type(link->connector_signal);
7617 res = drm_connector_init_with_ddc(
7620 &amdgpu_dm_connector_funcs,
7625 DRM_ERROR("connector_init failed\n");
7626 aconnector->connector_id = -1;
7630 drm_connector_helper_add(
7632 &amdgpu_dm_connector_helper_funcs);
7634 amdgpu_dm_connector_init_helper(
7641 drm_connector_attach_encoder(
7642 &aconnector->base, &aencoder->base);
7644 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7645 || connector_type == DRM_MODE_CONNECTOR_eDP)
7646 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7651 aconnector->i2c = NULL;
7656 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7658 switch (adev->mode_info.num_crtc) {
7675 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7676 struct amdgpu_encoder *aencoder,
7677 uint32_t link_index)
7679 struct amdgpu_device *adev = drm_to_adev(dev);
7681 int res = drm_encoder_init(dev,
7683 &amdgpu_dm_encoder_funcs,
7684 DRM_MODE_ENCODER_TMDS,
7687 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7690 aencoder->encoder_id = link_index;
7692 aencoder->encoder_id = -1;
7694 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7699 static void manage_dm_interrupts(struct amdgpu_device *adev,
7700 struct amdgpu_crtc *acrtc,
7704 * We have no guarantee that the frontend index maps to the same
7705 * backend index - some even map to more than one.
7707 * TODO: Use a different interrupt or check DC itself for the mapping.
7710 amdgpu_display_crtc_idx_to_irq_type(
7715 drm_crtc_vblank_on(&acrtc->base);
7718 &adev->pageflip_irq,
7720 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7727 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7735 &adev->pageflip_irq,
7737 drm_crtc_vblank_off(&acrtc->base);
7741 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7742 struct amdgpu_crtc *acrtc)
7745 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7748 * This reads the current state for the IRQ and force reapplies
7749 * the setting to hardware.
7751 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7755 is_scaling_state_different(const struct dm_connector_state *dm_state,
7756 const struct dm_connector_state *old_dm_state)
7758 if (dm_state->scaling != old_dm_state->scaling)
7760 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7761 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7763 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7764 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7766 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7767 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7772 #ifdef CONFIG_DRM_AMD_DC_HDCP
7773 static bool is_content_protection_different(struct drm_connector_state *state,
7774 const struct drm_connector_state *old_state,
7775 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7777 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7778 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7780 /* Handle: Type0/1 change */
7781 if (old_state->hdcp_content_type != state->hdcp_content_type &&
7782 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7783 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7787 /* CP is being re enabled, ignore this
7789 * Handles: ENABLED -> DESIRED
7791 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7792 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7793 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7797 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7799 * Handles: UNDESIRED -> ENABLED
7801 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7802 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7803 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7805 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7806 * hot-plug, headless s3, dpms
7808 * Handles: DESIRED -> DESIRED (Special case)
7810 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7811 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7812 dm_con_state->update_hdcp = false;
7817 * Handles: UNDESIRED -> UNDESIRED
7818 * DESIRED -> DESIRED
7819 * ENABLED -> ENABLED
7821 if (old_state->content_protection == state->content_protection)
7825 * Handles: UNDESIRED -> DESIRED
7826 * DESIRED -> UNDESIRED
7827 * ENABLED -> UNDESIRED
7829 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7833 * Handles: DESIRED -> ENABLED
7839 static void remove_stream(struct amdgpu_device *adev,
7840 struct amdgpu_crtc *acrtc,
7841 struct dc_stream_state *stream)
7843 /* this is the update mode case */
7845 acrtc->otg_inst = -1;
7846 acrtc->enabled = false;
7849 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7850 struct dc_cursor_position *position)
7852 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7854 int xorigin = 0, yorigin = 0;
7856 if (!crtc || !plane->state->fb)
7859 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7860 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7861 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7863 plane->state->crtc_w,
7864 plane->state->crtc_h);
7868 x = plane->state->crtc_x;
7869 y = plane->state->crtc_y;
7871 if (x <= -amdgpu_crtc->max_cursor_width ||
7872 y <= -amdgpu_crtc->max_cursor_height)
7876 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
7880 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
7883 position->enable = true;
7884 position->translate_by_source = true;
7887 position->x_hotspot = xorigin;
7888 position->y_hotspot = yorigin;
7893 static void handle_cursor_update(struct drm_plane *plane,
7894 struct drm_plane_state *old_plane_state)
7896 struct amdgpu_device *adev = drm_to_adev(plane->dev);
7897 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
7898 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
7899 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
7900 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7901 uint64_t address = afb ? afb->address : 0;
7902 struct dc_cursor_position position = {0};
7903 struct dc_cursor_attributes attributes;
7906 if (!plane->state->fb && !old_plane_state->fb)
7909 DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
7911 amdgpu_crtc->crtc_id,
7912 plane->state->crtc_w,
7913 plane->state->crtc_h);
7915 ret = get_cursor_position(plane, crtc, &position);
7919 if (!position.enable) {
7920 /* turn off cursor */
7921 if (crtc_state && crtc_state->stream) {
7922 mutex_lock(&adev->dm.dc_lock);
7923 dc_stream_set_cursor_position(crtc_state->stream,
7925 mutex_unlock(&adev->dm.dc_lock);
7930 amdgpu_crtc->cursor_width = plane->state->crtc_w;
7931 amdgpu_crtc->cursor_height = plane->state->crtc_h;
7933 memset(&attributes, 0, sizeof(attributes));
7934 attributes.address.high_part = upper_32_bits(address);
7935 attributes.address.low_part = lower_32_bits(address);
7936 attributes.width = plane->state->crtc_w;
7937 attributes.height = plane->state->crtc_h;
7938 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
7939 attributes.rotation_angle = 0;
7940 attributes.attribute_flags.value = 0;
7942 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
7944 if (crtc_state->stream) {
7945 mutex_lock(&adev->dm.dc_lock);
7946 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
7948 DRM_ERROR("DC failed to set cursor attributes\n");
7950 if (!dc_stream_set_cursor_position(crtc_state->stream,
7952 DRM_ERROR("DC failed to set cursor position\n");
7953 mutex_unlock(&adev->dm.dc_lock);
7957 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7960 assert_spin_locked(&acrtc->base.dev->event_lock);
7961 WARN_ON(acrtc->event);
7963 acrtc->event = acrtc->base.state->event;
7965 /* Set the flip status */
7966 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7968 /* Mark this event as consumed */
7969 acrtc->base.state->event = NULL;
7971 DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7975 static void update_freesync_state_on_stream(
7976 struct amdgpu_display_manager *dm,
7977 struct dm_crtc_state *new_crtc_state,
7978 struct dc_stream_state *new_stream,
7979 struct dc_plane_state *surface,
7980 u32 flip_timestamp_in_us)
7982 struct mod_vrr_params vrr_params;
7983 struct dc_info_packet vrr_infopacket = {0};
7984 struct amdgpu_device *adev = dm->adev;
7985 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7986 unsigned long flags;
7987 bool pack_sdp_v1_3 = false;
7993 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7994 * For now it's sufficient to just guard against these conditions.
7997 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8000 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8001 vrr_params = acrtc->dm_irq_params.vrr_params;
8004 mod_freesync_handle_preflip(
8005 dm->freesync_module,
8008 flip_timestamp_in_us,
8011 if (adev->family < AMDGPU_FAMILY_AI &&
8012 amdgpu_dm_vrr_active(new_crtc_state)) {
8013 mod_freesync_handle_v_update(dm->freesync_module,
8014 new_stream, &vrr_params);
8016 /* Need to call this before the frame ends. */
8017 dc_stream_adjust_vmin_vmax(dm->dc,
8018 new_crtc_state->stream,
8019 &vrr_params.adjust);
8023 mod_freesync_build_vrr_infopacket(
8024 dm->freesync_module,
8028 TRANSFER_FUNC_UNKNOWN,
8032 new_crtc_state->freesync_timing_changed |=
8033 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8035 sizeof(vrr_params.adjust)) != 0);
8037 new_crtc_state->freesync_vrr_info_changed |=
8038 (memcmp(&new_crtc_state->vrr_infopacket,
8040 sizeof(vrr_infopacket)) != 0);
8042 acrtc->dm_irq_params.vrr_params = vrr_params;
8043 new_crtc_state->vrr_infopacket = vrr_infopacket;
8045 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8046 new_stream->vrr_infopacket = vrr_infopacket;
8048 if (new_crtc_state->freesync_vrr_info_changed)
8049 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8050 new_crtc_state->base.crtc->base.id,
8051 (int)new_crtc_state->base.vrr_enabled,
8052 (int)vrr_params.state);
8054 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8057 static void update_stream_irq_parameters(
8058 struct amdgpu_display_manager *dm,
8059 struct dm_crtc_state *new_crtc_state)
8061 struct dc_stream_state *new_stream = new_crtc_state->stream;
8062 struct mod_vrr_params vrr_params;
8063 struct mod_freesync_config config = new_crtc_state->freesync_config;
8064 struct amdgpu_device *adev = dm->adev;
8065 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8066 unsigned long flags;
8072 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8073 * For now it's sufficient to just guard against these conditions.
8075 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8078 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8079 vrr_params = acrtc->dm_irq_params.vrr_params;
8081 if (new_crtc_state->vrr_supported &&
8082 config.min_refresh_in_uhz &&
8083 config.max_refresh_in_uhz) {
8085 * if freesync compatible mode was set, config.state will be set
8088 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8089 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8090 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8091 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8092 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8093 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8094 vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8096 config.state = new_crtc_state->base.vrr_enabled ?
8097 VRR_STATE_ACTIVE_VARIABLE :
8101 config.state = VRR_STATE_UNSUPPORTED;
8104 mod_freesync_build_vrr_params(dm->freesync_module,
8106 &config, &vrr_params);
8108 new_crtc_state->freesync_timing_changed |=
8109 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8110 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8112 new_crtc_state->freesync_config = config;
8113 /* Copy state for access from DM IRQ handler */
8114 acrtc->dm_irq_params.freesync_config = config;
8115 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8116 acrtc->dm_irq_params.vrr_params = vrr_params;
8117 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8120 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8121 struct dm_crtc_state *new_state)
8123 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8124 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8126 if (!old_vrr_active && new_vrr_active) {
8127 /* Transition VRR inactive -> active:
8128 * While VRR is active, we must not disable vblank irq, as a
8129 * reenable after disable would compute bogus vblank/pflip
8130 * timestamps if it likely happened inside display front-porch.
8132 * We also need vupdate irq for the actual core vblank handling
8135 dm_set_vupdate_irq(new_state->base.crtc, true);
8136 drm_crtc_vblank_get(new_state->base.crtc);
8137 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8138 __func__, new_state->base.crtc->base.id);
8139 } else if (old_vrr_active && !new_vrr_active) {
8140 /* Transition VRR active -> inactive:
8141 * Allow vblank irq disable again for fixed refresh rate.
8143 dm_set_vupdate_irq(new_state->base.crtc, false);
8144 drm_crtc_vblank_put(new_state->base.crtc);
8145 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8146 __func__, new_state->base.crtc->base.id);
8150 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8152 struct drm_plane *plane;
8153 struct drm_plane_state *old_plane_state, *new_plane_state;
8157 * TODO: Make this per-stream so we don't issue redundant updates for
8158 * commits with multiple streams.
8160 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
8162 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8163 handle_cursor_update(plane, old_plane_state);
8166 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8167 struct dc_state *dc_state,
8168 struct drm_device *dev,
8169 struct amdgpu_display_manager *dm,
8170 struct drm_crtc *pcrtc,
8171 bool wait_for_vblank)
8174 uint64_t timestamp_ns;
8175 struct drm_plane *plane;
8176 struct drm_plane_state *old_plane_state, *new_plane_state;
8177 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8178 struct drm_crtc_state *new_pcrtc_state =
8179 drm_atomic_get_new_crtc_state(state, pcrtc);
8180 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8181 struct dm_crtc_state *dm_old_crtc_state =
8182 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8183 int planes_count = 0, vpos, hpos;
8185 unsigned long flags;
8186 struct amdgpu_bo *abo;
8187 uint32_t target_vblank, last_flip_vblank;
8188 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8189 bool pflip_present = false;
8191 struct dc_surface_update surface_updates[MAX_SURFACES];
8192 struct dc_plane_info plane_infos[MAX_SURFACES];
8193 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8194 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8195 struct dc_stream_update stream_update;
8198 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8201 dm_error("Failed to allocate update bundle\n");
8206 * Disable the cursor first if we're disabling all the planes.
8207 * It'll remain on the screen after the planes are re-enabled
8210 if (acrtc_state->active_planes == 0)
8211 amdgpu_dm_commit_cursors(state);
8213 /* update planes when needed */
8214 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8215 struct drm_crtc *crtc = new_plane_state->crtc;
8216 struct drm_crtc_state *new_crtc_state;
8217 struct drm_framebuffer *fb = new_plane_state->fb;
8218 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8219 bool plane_needs_flip;
8220 struct dc_plane_state *dc_plane;
8221 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8223 /* Cursor plane is handled after stream updates */
8224 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8227 if (!fb || !crtc || pcrtc != crtc)
8230 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8231 if (!new_crtc_state->active)
8234 dc_plane = dm_new_plane_state->dc_state;
8236 bundle->surface_updates[planes_count].surface = dc_plane;
8237 if (new_pcrtc_state->color_mgmt_changed) {
8238 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8239 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8240 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8243 fill_dc_scaling_info(new_plane_state,
8244 &bundle->scaling_infos[planes_count]);
8246 bundle->surface_updates[planes_count].scaling_info =
8247 &bundle->scaling_infos[planes_count];
8249 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8251 pflip_present = pflip_present || plane_needs_flip;
8253 if (!plane_needs_flip) {
8258 abo = gem_to_amdgpu_bo(fb->obj[0]);
8261 * Wait for all fences on this FB. Do limited wait to avoid
8262 * deadlock during GPU reset when this fence will not signal
8263 * but we hold reservation lock for the BO.
8265 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
8267 msecs_to_jiffies(5000));
8268 if (unlikely(r <= 0))
8269 DRM_ERROR("Waiting for fences timed out!");
8271 fill_dc_plane_info_and_addr(
8272 dm->adev, new_plane_state,
8274 &bundle->plane_infos[planes_count],
8275 &bundle->flip_addrs[planes_count].address,
8276 afb->tmz_surface, false);
8278 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
8279 new_plane_state->plane->index,
8280 bundle->plane_infos[planes_count].dcc.enable);
8282 bundle->surface_updates[planes_count].plane_info =
8283 &bundle->plane_infos[planes_count];
8286 * Only allow immediate flips for fast updates that don't
8287 * change FB pitch, DCC state, rotation or mirroing.
8289 bundle->flip_addrs[planes_count].flip_immediate =
8290 crtc->state->async_flip &&
8291 acrtc_state->update_type == UPDATE_TYPE_FAST;
8293 timestamp_ns = ktime_get_ns();
8294 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8295 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8296 bundle->surface_updates[planes_count].surface = dc_plane;
8298 if (!bundle->surface_updates[planes_count].surface) {
8299 DRM_ERROR("No surface for CRTC: id=%d\n",
8300 acrtc_attach->crtc_id);
8304 if (plane == pcrtc->primary)
8305 update_freesync_state_on_stream(
8308 acrtc_state->stream,
8310 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
8312 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
8314 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8315 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
8321 if (pflip_present) {
8323 /* Use old throttling in non-vrr fixed refresh rate mode
8324 * to keep flip scheduling based on target vblank counts
8325 * working in a backwards compatible way, e.g., for
8326 * clients using the GLX_OML_sync_control extension or
8327 * DRI3/Present extension with defined target_msc.
8329 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
8332 /* For variable refresh rate mode only:
8333 * Get vblank of last completed flip to avoid > 1 vrr
8334 * flips per video frame by use of throttling, but allow
8335 * flip programming anywhere in the possibly large
8336 * variable vrr vblank interval for fine-grained flip
8337 * timing control and more opportunity to avoid stutter
8338 * on late submission of flips.
8340 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8341 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
8342 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8345 target_vblank = last_flip_vblank + wait_for_vblank;
8348 * Wait until we're out of the vertical blank period before the one
8349 * targeted by the flip
8351 while ((acrtc_attach->enabled &&
8352 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8353 0, &vpos, &hpos, NULL,
8354 NULL, &pcrtc->hwmode)
8355 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8356 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8357 (int)(target_vblank -
8358 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8359 usleep_range(1000, 1100);
8363 * Prepare the flip event for the pageflip interrupt to handle.
8365 * This only works in the case where we've already turned on the
8366 * appropriate hardware blocks (eg. HUBP) so in the transition case
8367 * from 0 -> n planes we have to skip a hardware generated event
8368 * and rely on sending it from software.
8370 if (acrtc_attach->base.state->event &&
8371 acrtc_state->active_planes > 0) {
8372 drm_crtc_vblank_get(pcrtc);
8374 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8376 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8377 prepare_flip_isr(acrtc_attach);
8379 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8382 if (acrtc_state->stream) {
8383 if (acrtc_state->freesync_vrr_info_changed)
8384 bundle->stream_update.vrr_infopacket =
8385 &acrtc_state->stream->vrr_infopacket;
8389 /* Update the planes if changed or disable if we don't have any. */
8390 if ((planes_count || acrtc_state->active_planes == 0) &&
8391 acrtc_state->stream) {
8392 bundle->stream_update.stream = acrtc_state->stream;
8393 if (new_pcrtc_state->mode_changed) {
8394 bundle->stream_update.src = acrtc_state->stream->src;
8395 bundle->stream_update.dst = acrtc_state->stream->dst;
8398 if (new_pcrtc_state->color_mgmt_changed) {
8400 * TODO: This isn't fully correct since we've actually
8401 * already modified the stream in place.
8403 bundle->stream_update.gamut_remap =
8404 &acrtc_state->stream->gamut_remap_matrix;
8405 bundle->stream_update.output_csc_transform =
8406 &acrtc_state->stream->csc_color_matrix;
8407 bundle->stream_update.out_transfer_func =
8408 acrtc_state->stream->out_transfer_func;
8411 acrtc_state->stream->abm_level = acrtc_state->abm_level;
8412 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
8413 bundle->stream_update.abm_level = &acrtc_state->abm_level;
8416 * If FreeSync state on the stream has changed then we need to
8417 * re-adjust the min/max bounds now that DC doesn't handle this
8418 * as part of commit.
8420 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
8421 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8422 dc_stream_adjust_vmin_vmax(
8423 dm->dc, acrtc_state->stream,
8424 &acrtc_attach->dm_irq_params.vrr_params.adjust);
8425 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8427 mutex_lock(&dm->dc_lock);
8428 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8429 acrtc_state->stream->link->psr_settings.psr_allow_active)
8430 amdgpu_dm_psr_disable(acrtc_state->stream);
8432 dc_commit_updates_for_stream(dm->dc,
8433 bundle->surface_updates,
8435 acrtc_state->stream,
8436 &bundle->stream_update,
8440 * Enable or disable the interrupts on the backend.
8442 * Most pipes are put into power gating when unused.
8444 * When power gating is enabled on a pipe we lose the
8445 * interrupt enablement state when power gating is disabled.
8447 * So we need to update the IRQ control state in hardware
8448 * whenever the pipe turns on (since it could be previously
8449 * power gated) or off (since some pipes can't be power gated
8452 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8453 dm_update_pflip_irq_state(drm_to_adev(dev),
8456 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8457 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8458 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8459 amdgpu_dm_link_setup_psr(acrtc_state->stream);
8460 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
8461 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8462 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
8463 amdgpu_dm_psr_enable(acrtc_state->stream);
8466 mutex_unlock(&dm->dc_lock);
8470 * Update cursor state *after* programming all the planes.
8471 * This avoids redundant programming in the case where we're going
8472 * to be disabling a single plane - those pipes are being disabled.
8474 if (acrtc_state->active_planes)
8475 amdgpu_dm_commit_cursors(state);
8481 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8482 struct drm_atomic_state *state)
8484 struct amdgpu_device *adev = drm_to_adev(dev);
8485 struct amdgpu_dm_connector *aconnector;
8486 struct drm_connector *connector;
8487 struct drm_connector_state *old_con_state, *new_con_state;
8488 struct drm_crtc_state *new_crtc_state;
8489 struct dm_crtc_state *new_dm_crtc_state;
8490 const struct dc_stream_status *status;
8493 /* Notify device removals. */
8494 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8495 if (old_con_state->crtc != new_con_state->crtc) {
8496 /* CRTC changes require notification. */
8500 if (!new_con_state->crtc)
8503 new_crtc_state = drm_atomic_get_new_crtc_state(
8504 state, new_con_state->crtc);
8506 if (!new_crtc_state)
8509 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8513 aconnector = to_amdgpu_dm_connector(connector);
8515 mutex_lock(&adev->dm.audio_lock);
8516 inst = aconnector->audio_inst;
8517 aconnector->audio_inst = -1;
8518 mutex_unlock(&adev->dm.audio_lock);
8520 amdgpu_dm_audio_eld_notify(adev, inst);
8523 /* Notify audio device additions. */
8524 for_each_new_connector_in_state(state, connector, new_con_state, i) {
8525 if (!new_con_state->crtc)
8528 new_crtc_state = drm_atomic_get_new_crtc_state(
8529 state, new_con_state->crtc);
8531 if (!new_crtc_state)
8534 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8537 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8538 if (!new_dm_crtc_state->stream)
8541 status = dc_stream_get_status(new_dm_crtc_state->stream);
8545 aconnector = to_amdgpu_dm_connector(connector);
8547 mutex_lock(&adev->dm.audio_lock);
8548 inst = status->audio_inst;
8549 aconnector->audio_inst = inst;
8550 mutex_unlock(&adev->dm.audio_lock);
8552 amdgpu_dm_audio_eld_notify(adev, inst);
8557 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8558 * @crtc_state: the DRM CRTC state
8559 * @stream_state: the DC stream state.
8561 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8562 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8564 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8565 struct dc_stream_state *stream_state)
8567 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8571 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8572 * @state: The atomic state to commit
8574 * This will tell DC to commit the constructed DC state from atomic_check,
8575 * programming the hardware. Any failures here implies a hardware failure, since
8576 * atomic check should have filtered anything non-kosher.
8578 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8580 struct drm_device *dev = state->dev;
8581 struct amdgpu_device *adev = drm_to_adev(dev);
8582 struct amdgpu_display_manager *dm = &adev->dm;
8583 struct dm_atomic_state *dm_state;
8584 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8586 struct drm_crtc *crtc;
8587 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8588 unsigned long flags;
8589 bool wait_for_vblank = true;
8590 struct drm_connector *connector;
8591 struct drm_connector_state *old_con_state, *new_con_state;
8592 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8593 int crtc_disable_count = 0;
8594 bool mode_set_reset_required = false;
8596 trace_amdgpu_dm_atomic_commit_tail_begin(state);
8598 drm_atomic_helper_update_legacy_modeset_state(dev, state);
8600 dm_state = dm_atomic_get_new_state(state);
8601 if (dm_state && dm_state->context) {
8602 dc_state = dm_state->context;
8604 /* No state changes, retain current state. */
8605 dc_state_temp = dc_create_state(dm->dc);
8606 ASSERT(dc_state_temp);
8607 dc_state = dc_state_temp;
8608 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8611 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8612 new_crtc_state, i) {
8613 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8615 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8617 if (old_crtc_state->active &&
8618 (!new_crtc_state->active ||
8619 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8620 manage_dm_interrupts(adev, acrtc, false);
8621 dc_stream_release(dm_old_crtc_state->stream);
8625 drm_atomic_helper_calc_timestamping_constants(state);
8627 /* update changed items */
8628 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8629 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8631 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8632 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8635 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8636 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8637 "connectors_changed:%d\n",
8639 new_crtc_state->enable,
8640 new_crtc_state->active,
8641 new_crtc_state->planes_changed,
8642 new_crtc_state->mode_changed,
8643 new_crtc_state->active_changed,
8644 new_crtc_state->connectors_changed);
8646 /* Disable cursor if disabling crtc */
8647 if (old_crtc_state->active && !new_crtc_state->active) {
8648 struct dc_cursor_position position;
8650 memset(&position, 0, sizeof(position));
8651 mutex_lock(&dm->dc_lock);
8652 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8653 mutex_unlock(&dm->dc_lock);
8656 /* Copy all transient state flags into dc state */
8657 if (dm_new_crtc_state->stream) {
8658 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8659 dm_new_crtc_state->stream);
8662 /* handles headless hotplug case, updating new_state and
8663 * aconnector as needed
8666 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8668 DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8670 if (!dm_new_crtc_state->stream) {
8672 * this could happen because of issues with
8673 * userspace notifications delivery.
8674 * In this case userspace tries to set mode on
8675 * display which is disconnected in fact.
8676 * dc_sink is NULL in this case on aconnector.
8677 * We expect reset mode will come soon.
8679 * This can also happen when unplug is done
8680 * during resume sequence ended
8682 * In this case, we want to pretend we still
8683 * have a sink to keep the pipe running so that
8684 * hw state is consistent with the sw state
8686 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8687 __func__, acrtc->base.base.id);
8691 if (dm_old_crtc_state->stream)
8692 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8694 pm_runtime_get_noresume(dev->dev);
8696 acrtc->enabled = true;
8697 acrtc->hw_mode = new_crtc_state->mode;
8698 crtc->hwmode = new_crtc_state->mode;
8699 mode_set_reset_required = true;
8700 } else if (modereset_required(new_crtc_state)) {
8701 DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8702 /* i.e. reset mode */
8703 if (dm_old_crtc_state->stream)
8704 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8706 mode_set_reset_required = true;
8708 } /* for_each_crtc_in_state() */
8711 /* if there mode set or reset, disable eDP PSR */
8712 if (mode_set_reset_required)
8713 amdgpu_dm_psr_disable_all(dm);
8715 dm_enable_per_frame_crtc_master_sync(dc_state);
8716 mutex_lock(&dm->dc_lock);
8717 WARN_ON(!dc_commit_state(dm->dc, dc_state));
8718 #if defined(CONFIG_DRM_AMD_DC_DCN)
8719 /* Allow idle optimization when vblank count is 0 for display off */
8720 if (dm->active_vblank_irq_count == 0)
8721 dc_allow_idle_optimizations(dm->dc,true);
8723 mutex_unlock(&dm->dc_lock);
8726 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8727 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8729 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8731 if (dm_new_crtc_state->stream != NULL) {
8732 const struct dc_stream_status *status =
8733 dc_stream_get_status(dm_new_crtc_state->stream);
8736 status = dc_stream_get_status_from_state(dc_state,
8737 dm_new_crtc_state->stream);
8739 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8741 acrtc->otg_inst = status->primary_otg_inst;
8744 #ifdef CONFIG_DRM_AMD_DC_HDCP
8745 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8746 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8747 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8748 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8750 new_crtc_state = NULL;
8753 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8755 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8757 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8758 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8759 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8760 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8761 dm_new_con_state->update_hdcp = true;
8765 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8766 hdcp_update_display(
8767 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8768 new_con_state->hdcp_content_type,
8769 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
8773 /* Handle connector state changes */
8774 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8775 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8776 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8777 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8778 struct dc_surface_update dummy_updates[MAX_SURFACES];
8779 struct dc_stream_update stream_update;
8780 struct dc_info_packet hdr_packet;
8781 struct dc_stream_status *status = NULL;
8782 bool abm_changed, hdr_changed, scaling_changed;
8784 memset(&dummy_updates, 0, sizeof(dummy_updates));
8785 memset(&stream_update, 0, sizeof(stream_update));
8788 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8789 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8792 /* Skip any modesets/resets */
8793 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8796 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8797 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8799 scaling_changed = is_scaling_state_different(dm_new_con_state,
8802 abm_changed = dm_new_crtc_state->abm_level !=
8803 dm_old_crtc_state->abm_level;
8806 is_hdr_metadata_different(old_con_state, new_con_state);
8808 if (!scaling_changed && !abm_changed && !hdr_changed)
8811 stream_update.stream = dm_new_crtc_state->stream;
8812 if (scaling_changed) {
8813 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8814 dm_new_con_state, dm_new_crtc_state->stream);
8816 stream_update.src = dm_new_crtc_state->stream->src;
8817 stream_update.dst = dm_new_crtc_state->stream->dst;
8821 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8823 stream_update.abm_level = &dm_new_crtc_state->abm_level;
8827 fill_hdr_info_packet(new_con_state, &hdr_packet);
8828 stream_update.hdr_static_metadata = &hdr_packet;
8831 status = dc_stream_get_status(dm_new_crtc_state->stream);
8833 WARN_ON(!status->plane_count);
8836 * TODO: DC refuses to perform stream updates without a dc_surface_update.
8837 * Here we create an empty update on each plane.
8838 * To fix this, DC should permit updating only stream properties.
8840 for (j = 0; j < status->plane_count; j++)
8841 dummy_updates[j].surface = status->plane_states[0];
8844 mutex_lock(&dm->dc_lock);
8845 dc_commit_updates_for_stream(dm->dc,
8847 status->plane_count,
8848 dm_new_crtc_state->stream,
8851 mutex_unlock(&dm->dc_lock);
8854 /* Count number of newly disabled CRTCs for dropping PM refs later. */
8855 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8856 new_crtc_state, i) {
8857 if (old_crtc_state->active && !new_crtc_state->active)
8858 crtc_disable_count++;
8860 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8861 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8863 /* For freesync config update on crtc state and params for irq */
8864 update_stream_irq_parameters(dm, dm_new_crtc_state);
8866 /* Handle vrr on->off / off->on transitions */
8867 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8872 * Enable interrupts for CRTCs that are newly enabled or went through
8873 * a modeset. It was intentionally deferred until after the front end
8874 * state was modified to wait until the OTG was on and so the IRQ
8875 * handlers didn't access stale or invalid state.
8877 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8878 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8879 #ifdef CONFIG_DEBUG_FS
8880 bool configure_crc = false;
8881 enum amdgpu_dm_pipe_crc_source cur_crc_src;
8883 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8885 if (new_crtc_state->active &&
8886 (!old_crtc_state->active ||
8887 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8888 dc_stream_retain(dm_new_crtc_state->stream);
8889 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8890 manage_dm_interrupts(adev, acrtc, true);
8892 #ifdef CONFIG_DEBUG_FS
8894 * Frontend may have changed so reapply the CRC capture
8895 * settings for the stream.
8897 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8898 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8899 cur_crc_src = acrtc->dm_irq_params.crc_src;
8900 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8902 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
8903 configure_crc = true;
8904 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8905 if (amdgpu_dm_crc_window_is_activated(crtc))
8906 configure_crc = false;
8911 amdgpu_dm_crtc_configure_crc_source(
8912 crtc, dm_new_crtc_state, cur_crc_src);
8917 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
8918 if (new_crtc_state->async_flip)
8919 wait_for_vblank = false;
8921 /* update planes when needed per crtc*/
8922 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
8923 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8925 if (dm_new_crtc_state->stream)
8926 amdgpu_dm_commit_planes(state, dc_state, dev,
8927 dm, crtc, wait_for_vblank);
8930 /* Update audio instances for each connector. */
8931 amdgpu_dm_commit_audio(dev, state);
8934 * send vblank event on all events not handled in flip and
8935 * mark consumed event for drm_atomic_helper_commit_hw_done
8937 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8938 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8940 if (new_crtc_state->event)
8941 drm_send_event_locked(dev, &new_crtc_state->event->base);
8943 new_crtc_state->event = NULL;
8945 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8947 /* Signal HW programming completion */
8948 drm_atomic_helper_commit_hw_done(state);
8950 if (wait_for_vblank)
8951 drm_atomic_helper_wait_for_flip_done(dev, state);
8953 drm_atomic_helper_cleanup_planes(dev, state);
8955 /* return the stolen vga memory back to VRAM */
8956 if (!adev->mman.keep_stolen_vga_memory)
8957 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8958 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8961 * Finally, drop a runtime PM reference for each newly disabled CRTC,
8962 * so we can put the GPU into runtime suspend if we're not driving any
8965 for (i = 0; i < crtc_disable_count; i++)
8966 pm_runtime_put_autosuspend(dev->dev);
8967 pm_runtime_mark_last_busy(dev->dev);
8970 dc_release_state(dc_state_temp);
8974 static int dm_force_atomic_commit(struct drm_connector *connector)
8977 struct drm_device *ddev = connector->dev;
8978 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8979 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8980 struct drm_plane *plane = disconnected_acrtc->base.primary;
8981 struct drm_connector_state *conn_state;
8982 struct drm_crtc_state *crtc_state;
8983 struct drm_plane_state *plane_state;
8988 state->acquire_ctx = ddev->mode_config.acquire_ctx;
8990 /* Construct an atomic state to restore previous display setting */
8993 * Attach connectors to drm_atomic_state
8995 conn_state = drm_atomic_get_connector_state(state, connector);
8997 ret = PTR_ERR_OR_ZERO(conn_state);
9001 /* Attach crtc to drm_atomic_state*/
9002 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9004 ret = PTR_ERR_OR_ZERO(crtc_state);
9008 /* force a restore */
9009 crtc_state->mode_changed = true;
9011 /* Attach plane to drm_atomic_state */
9012 plane_state = drm_atomic_get_plane_state(state, plane);
9014 ret = PTR_ERR_OR_ZERO(plane_state);
9018 /* Call commit internally with the state we just constructed */
9019 ret = drm_atomic_commit(state);
9022 drm_atomic_state_put(state);
9024 DRM_ERROR("Restoring old state failed with %i\n", ret);
9030 * This function handles all cases when set mode does not come upon hotplug.
9031 * This includes when a display is unplugged then plugged back into the
9032 * same port and when running without usermode desktop manager supprot
9034 void dm_restore_drm_connector_state(struct drm_device *dev,
9035 struct drm_connector *connector)
9037 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9038 struct amdgpu_crtc *disconnected_acrtc;
9039 struct dm_crtc_state *acrtc_state;
9041 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9044 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9045 if (!disconnected_acrtc)
9048 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9049 if (!acrtc_state->stream)
9053 * If the previous sink is not released and different from the current,
9054 * we deduce we are in a state where we can not rely on usermode call
9055 * to turn on the display, so we do it here
9057 if (acrtc_state->stream->sink != aconnector->dc_sink)
9058 dm_force_atomic_commit(&aconnector->base);
9062 * Grabs all modesetting locks to serialize against any blocking commits,
9063 * Waits for completion of all non blocking commits.
9065 static int do_aquire_global_lock(struct drm_device *dev,
9066 struct drm_atomic_state *state)
9068 struct drm_crtc *crtc;
9069 struct drm_crtc_commit *commit;
9073 * Adding all modeset locks to aquire_ctx will
9074 * ensure that when the framework release it the
9075 * extra locks we are locking here will get released to
9077 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9081 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9082 spin_lock(&crtc->commit_lock);
9083 commit = list_first_entry_or_null(&crtc->commit_list,
9084 struct drm_crtc_commit, commit_entry);
9086 drm_crtc_commit_get(commit);
9087 spin_unlock(&crtc->commit_lock);
9093 * Make sure all pending HW programming completed and
9096 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9099 ret = wait_for_completion_interruptible_timeout(
9100 &commit->flip_done, 10*HZ);
9103 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9104 "timed out\n", crtc->base.id, crtc->name);
9106 drm_crtc_commit_put(commit);
9109 return ret < 0 ? ret : 0;
9112 static void get_freesync_config_for_crtc(
9113 struct dm_crtc_state *new_crtc_state,
9114 struct dm_connector_state *new_con_state)
9116 struct mod_freesync_config config = {0};
9117 struct amdgpu_dm_connector *aconnector =
9118 to_amdgpu_dm_connector(new_con_state->base.connector);
9119 struct drm_display_mode *mode = &new_crtc_state->base.mode;
9120 int vrefresh = drm_mode_vrefresh(mode);
9121 bool fs_vid_mode = false;
9123 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9124 vrefresh >= aconnector->min_vfreq &&
9125 vrefresh <= aconnector->max_vfreq;
9127 if (new_crtc_state->vrr_supported) {
9128 new_crtc_state->stream->ignore_msa_timing_param = true;
9129 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9131 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9132 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9133 config.vsif_supported = true;
9137 config.state = VRR_STATE_ACTIVE_FIXED;
9138 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9140 } else if (new_crtc_state->base.vrr_enabled) {
9141 config.state = VRR_STATE_ACTIVE_VARIABLE;
9143 config.state = VRR_STATE_INACTIVE;
9147 new_crtc_state->freesync_config = config;
9150 static void reset_freesync_config_for_crtc(
9151 struct dm_crtc_state *new_crtc_state)
9153 new_crtc_state->vrr_supported = false;
9155 memset(&new_crtc_state->vrr_infopacket, 0,
9156 sizeof(new_crtc_state->vrr_infopacket));
9160 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9161 struct drm_crtc_state *new_crtc_state)
9163 struct drm_display_mode old_mode, new_mode;
9165 if (!old_crtc_state || !new_crtc_state)
9168 old_mode = old_crtc_state->mode;
9169 new_mode = new_crtc_state->mode;
9171 if (old_mode.clock == new_mode.clock &&
9172 old_mode.hdisplay == new_mode.hdisplay &&
9173 old_mode.vdisplay == new_mode.vdisplay &&
9174 old_mode.htotal == new_mode.htotal &&
9175 old_mode.vtotal != new_mode.vtotal &&
9176 old_mode.hsync_start == new_mode.hsync_start &&
9177 old_mode.vsync_start != new_mode.vsync_start &&
9178 old_mode.hsync_end == new_mode.hsync_end &&
9179 old_mode.vsync_end != new_mode.vsync_end &&
9180 old_mode.hskew == new_mode.hskew &&
9181 old_mode.vscan == new_mode.vscan &&
9182 (old_mode.vsync_end - old_mode.vsync_start) ==
9183 (new_mode.vsync_end - new_mode.vsync_start))
9189 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9190 uint64_t num, den, res;
9191 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9193 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9195 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9196 den = (unsigned long long)new_crtc_state->mode.htotal *
9197 (unsigned long long)new_crtc_state->mode.vtotal;
9199 res = div_u64(num, den);
9200 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9203 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9204 struct drm_atomic_state *state,
9205 struct drm_crtc *crtc,
9206 struct drm_crtc_state *old_crtc_state,
9207 struct drm_crtc_state *new_crtc_state,
9209 bool *lock_and_validation_needed)
9211 struct dm_atomic_state *dm_state = NULL;
9212 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9213 struct dc_stream_state *new_stream;
9217 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9218 * update changed items
9220 struct amdgpu_crtc *acrtc = NULL;
9221 struct amdgpu_dm_connector *aconnector = NULL;
9222 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9223 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
9227 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9228 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9229 acrtc = to_amdgpu_crtc(crtc);
9230 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
9232 /* TODO This hack should go away */
9233 if (aconnector && enable) {
9234 /* Make sure fake sink is created in plug-in scenario */
9235 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9237 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9240 if (IS_ERR(drm_new_conn_state)) {
9241 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9245 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9246 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
9248 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9251 new_stream = create_validate_stream_for_sink(aconnector,
9252 &new_crtc_state->mode,
9254 dm_old_crtc_state->stream);
9257 * we can have no stream on ACTION_SET if a display
9258 * was disconnected during S3, in this case it is not an
9259 * error, the OS will be updated after detection, and
9260 * will do the right thing on next atomic commit
9264 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9265 __func__, acrtc->base.base.id);
9271 * TODO: Check VSDB bits to decide whether this should
9272 * be enabled or not.
9274 new_stream->triggered_crtc_reset.enabled =
9275 dm->force_timing_sync;
9277 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9279 ret = fill_hdr_info_packet(drm_new_conn_state,
9280 &new_stream->hdr_static_metadata);
9285 * If we already removed the old stream from the context
9286 * (and set the new stream to NULL) then we can't reuse
9287 * the old stream even if the stream and scaling are unchanged.
9288 * We'll hit the BUG_ON and black screen.
9290 * TODO: Refactor this function to allow this check to work
9291 * in all conditions.
9293 if (amdgpu_freesync_vid_mode &&
9294 dm_new_crtc_state->stream &&
9295 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9298 if (dm_new_crtc_state->stream &&
9299 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9300 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9301 new_crtc_state->mode_changed = false;
9302 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9303 new_crtc_state->mode_changed);
9307 /* mode_changed flag may get updated above, need to check again */
9308 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9312 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9313 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9314 "connectors_changed:%d\n",
9316 new_crtc_state->enable,
9317 new_crtc_state->active,
9318 new_crtc_state->planes_changed,
9319 new_crtc_state->mode_changed,
9320 new_crtc_state->active_changed,
9321 new_crtc_state->connectors_changed);
9323 /* Remove stream for any changed/disabled CRTC */
9326 if (!dm_old_crtc_state->stream)
9329 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
9330 is_timing_unchanged_for_freesync(new_crtc_state,
9332 new_crtc_state->mode_changed = false;
9334 "Mode change not required for front porch change, "
9335 "setting mode_changed to %d",
9336 new_crtc_state->mode_changed);
9338 set_freesync_fixed_config(dm_new_crtc_state);
9341 } else if (amdgpu_freesync_vid_mode && aconnector &&
9342 is_freesync_video_mode(&new_crtc_state->mode,
9344 set_freesync_fixed_config(dm_new_crtc_state);
9347 ret = dm_atomic_get_state(state, &dm_state);
9351 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9354 /* i.e. reset mode */
9355 if (dc_remove_stream_from_ctx(
9358 dm_old_crtc_state->stream) != DC_OK) {
9363 dc_stream_release(dm_old_crtc_state->stream);
9364 dm_new_crtc_state->stream = NULL;
9366 reset_freesync_config_for_crtc(dm_new_crtc_state);
9368 *lock_and_validation_needed = true;
9370 } else {/* Add stream for any updated/enabled CRTC */
9372 * Quick fix to prevent NULL pointer on new_stream when
9373 * added MST connectors not found in existing crtc_state in the chained mode
9374 * TODO: need to dig out the root cause of that
9376 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
9379 if (modereset_required(new_crtc_state))
9382 if (modeset_required(new_crtc_state, new_stream,
9383 dm_old_crtc_state->stream)) {
9385 WARN_ON(dm_new_crtc_state->stream);
9387 ret = dm_atomic_get_state(state, &dm_state);
9391 dm_new_crtc_state->stream = new_stream;
9393 dc_stream_retain(new_stream);
9395 DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
9398 if (dc_add_stream_to_ctx(
9401 dm_new_crtc_state->stream) != DC_OK) {
9406 *lock_and_validation_needed = true;
9411 /* Release extra reference */
9413 dc_stream_release(new_stream);
9416 * We want to do dc stream updates that do not require a
9417 * full modeset below.
9419 if (!(enable && aconnector && new_crtc_state->active))
9422 * Given above conditions, the dc state cannot be NULL because:
9423 * 1. We're in the process of enabling CRTCs (just been added
9424 * to the dc context, or already is on the context)
9425 * 2. Has a valid connector attached, and
9426 * 3. Is currently active and enabled.
9427 * => The dc stream state currently exists.
9429 BUG_ON(dm_new_crtc_state->stream == NULL);
9431 /* Scaling or underscan settings */
9432 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
9433 update_stream_scaling_settings(
9434 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
9437 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9440 * Color management settings. We also update color properties
9441 * when a modeset is needed, to ensure it gets reprogrammed.
9443 if (dm_new_crtc_state->base.color_mgmt_changed ||
9444 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9445 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
9450 /* Update Freesync settings. */
9451 get_freesync_config_for_crtc(dm_new_crtc_state,
9458 dc_stream_release(new_stream);
9462 static bool should_reset_plane(struct drm_atomic_state *state,
9463 struct drm_plane *plane,
9464 struct drm_plane_state *old_plane_state,
9465 struct drm_plane_state *new_plane_state)
9467 struct drm_plane *other;
9468 struct drm_plane_state *old_other_state, *new_other_state;
9469 struct drm_crtc_state *new_crtc_state;
9473 * TODO: Remove this hack once the checks below are sufficient
9474 * enough to determine when we need to reset all the planes on
9477 if (state->allow_modeset)
9480 /* Exit early if we know that we're adding or removing the plane. */
9481 if (old_plane_state->crtc != new_plane_state->crtc)
9484 /* old crtc == new_crtc == NULL, plane not in context. */
9485 if (!new_plane_state->crtc)
9489 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9491 if (!new_crtc_state)
9494 /* CRTC Degamma changes currently require us to recreate planes. */
9495 if (new_crtc_state->color_mgmt_changed)
9498 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9502 * If there are any new primary or overlay planes being added or
9503 * removed then the z-order can potentially change. To ensure
9504 * correct z-order and pipe acquisition the current DC architecture
9505 * requires us to remove and recreate all existing planes.
9507 * TODO: Come up with a more elegant solution for this.
9509 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9510 struct amdgpu_framebuffer *old_afb, *new_afb;
9511 if (other->type == DRM_PLANE_TYPE_CURSOR)
9514 if (old_other_state->crtc != new_plane_state->crtc &&
9515 new_other_state->crtc != new_plane_state->crtc)
9518 if (old_other_state->crtc != new_other_state->crtc)
9521 /* Src/dst size and scaling updates. */
9522 if (old_other_state->src_w != new_other_state->src_w ||
9523 old_other_state->src_h != new_other_state->src_h ||
9524 old_other_state->crtc_w != new_other_state->crtc_w ||
9525 old_other_state->crtc_h != new_other_state->crtc_h)
9528 /* Rotation / mirroring updates. */
9529 if (old_other_state->rotation != new_other_state->rotation)
9532 /* Blending updates. */
9533 if (old_other_state->pixel_blend_mode !=
9534 new_other_state->pixel_blend_mode)
9537 /* Alpha updates. */
9538 if (old_other_state->alpha != new_other_state->alpha)
9541 /* Colorspace changes. */
9542 if (old_other_state->color_range != new_other_state->color_range ||
9543 old_other_state->color_encoding != new_other_state->color_encoding)
9546 /* Framebuffer checks fall at the end. */
9547 if (!old_other_state->fb || !new_other_state->fb)
9550 /* Pixel format changes can require bandwidth updates. */
9551 if (old_other_state->fb->format != new_other_state->fb->format)
9554 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9555 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9557 /* Tiling and DCC changes also require bandwidth updates. */
9558 if (old_afb->tiling_flags != new_afb->tiling_flags ||
9559 old_afb->base.modifier != new_afb->base.modifier)
9566 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9567 struct drm_plane_state *new_plane_state,
9568 struct drm_framebuffer *fb)
9570 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9571 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9575 if (fb->width > new_acrtc->max_cursor_width ||
9576 fb->height > new_acrtc->max_cursor_height) {
9577 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9578 new_plane_state->fb->width,
9579 new_plane_state->fb->height);
9582 if (new_plane_state->src_w != fb->width << 16 ||
9583 new_plane_state->src_h != fb->height << 16) {
9584 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9588 /* Pitch in pixels */
9589 pitch = fb->pitches[0] / fb->format->cpp[0];
9591 if (fb->width != pitch) {
9592 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9601 /* FB pitch is supported by cursor plane */
9604 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9608 /* Core DRM takes care of checking FB modifiers, so we only need to
9609 * check tiling flags when the FB doesn't have a modifier. */
9610 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9611 if (adev->family < AMDGPU_FAMILY_AI) {
9612 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9613 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9614 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9616 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9619 DRM_DEBUG_ATOMIC("Cursor FB not linear");
9627 static int dm_update_plane_state(struct dc *dc,
9628 struct drm_atomic_state *state,
9629 struct drm_plane *plane,
9630 struct drm_plane_state *old_plane_state,
9631 struct drm_plane_state *new_plane_state,
9633 bool *lock_and_validation_needed)
9636 struct dm_atomic_state *dm_state = NULL;
9637 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9638 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9639 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9640 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9641 struct amdgpu_crtc *new_acrtc;
9646 new_plane_crtc = new_plane_state->crtc;
9647 old_plane_crtc = old_plane_state->crtc;
9648 dm_new_plane_state = to_dm_plane_state(new_plane_state);
9649 dm_old_plane_state = to_dm_plane_state(old_plane_state);
9651 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9652 if (!enable || !new_plane_crtc ||
9653 drm_atomic_plane_disabling(plane->state, new_plane_state))
9656 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9658 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9659 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9663 if (new_plane_state->fb) {
9664 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9665 new_plane_state->fb);
9673 needs_reset = should_reset_plane(state, plane, old_plane_state,
9676 /* Remove any changed/removed planes */
9681 if (!old_plane_crtc)
9684 old_crtc_state = drm_atomic_get_old_crtc_state(
9685 state, old_plane_crtc);
9686 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9688 if (!dm_old_crtc_state->stream)
9691 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9692 plane->base.id, old_plane_crtc->base.id);
9694 ret = dm_atomic_get_state(state, &dm_state);
9698 if (!dc_remove_plane_from_context(
9700 dm_old_crtc_state->stream,
9701 dm_old_plane_state->dc_state,
9702 dm_state->context)) {
9708 dc_plane_state_release(dm_old_plane_state->dc_state);
9709 dm_new_plane_state->dc_state = NULL;
9711 *lock_and_validation_needed = true;
9713 } else { /* Add new planes */
9714 struct dc_plane_state *dc_new_plane_state;
9716 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9719 if (!new_plane_crtc)
9722 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9723 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9725 if (!dm_new_crtc_state->stream)
9731 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9735 WARN_ON(dm_new_plane_state->dc_state);
9737 dc_new_plane_state = dc_create_plane_state(dc);
9738 if (!dc_new_plane_state)
9741 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
9742 plane->base.id, new_plane_crtc->base.id);
9744 ret = fill_dc_plane_attributes(
9745 drm_to_adev(new_plane_crtc->dev),
9750 dc_plane_state_release(dc_new_plane_state);
9754 ret = dm_atomic_get_state(state, &dm_state);
9756 dc_plane_state_release(dc_new_plane_state);
9761 * Any atomic check errors that occur after this will
9762 * not need a release. The plane state will be attached
9763 * to the stream, and therefore part of the atomic
9764 * state. It'll be released when the atomic state is
9767 if (!dc_add_plane_to_context(
9769 dm_new_crtc_state->stream,
9771 dm_state->context)) {
9773 dc_plane_state_release(dc_new_plane_state);
9777 dm_new_plane_state->dc_state = dc_new_plane_state;
9779 /* Tell DC to do a full surface update every time there
9780 * is a plane change. Inefficient, but works for now.
9782 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9784 *lock_and_validation_needed = true;
9791 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9792 struct drm_crtc *crtc,
9793 struct drm_crtc_state *new_crtc_state)
9795 struct drm_plane_state *new_cursor_state, *new_primary_state;
9796 int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9798 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9799 * cursor per pipe but it's going to inherit the scaling and
9800 * positioning from the underlying pipe. Check the cursor plane's
9801 * blending properties match the primary plane's. */
9803 new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9804 new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9805 if (!new_cursor_state || !new_primary_state ||
9806 !new_cursor_state->fb || !new_primary_state->fb) {
9810 cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9811 (new_cursor_state->src_w >> 16);
9812 cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9813 (new_cursor_state->src_h >> 16);
9815 primary_scale_w = new_primary_state->crtc_w * 1000 /
9816 (new_primary_state->src_w >> 16);
9817 primary_scale_h = new_primary_state->crtc_h * 1000 /
9818 (new_primary_state->src_h >> 16);
9820 if (cursor_scale_w != primary_scale_w ||
9821 cursor_scale_h != primary_scale_h) {
9822 DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9829 #if defined(CONFIG_DRM_AMD_DC_DCN)
9830 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9832 struct drm_connector *connector;
9833 struct drm_connector_state *conn_state;
9834 struct amdgpu_dm_connector *aconnector = NULL;
9836 for_each_new_connector_in_state(state, connector, conn_state, i) {
9837 if (conn_state->crtc != crtc)
9840 aconnector = to_amdgpu_dm_connector(connector);
9841 if (!aconnector->port || !aconnector->mst_port)
9850 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9855 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9856 * @dev: The DRM device
9857 * @state: The atomic state to commit
9859 * Validate that the given atomic state is programmable by DC into hardware.
9860 * This involves constructing a &struct dc_state reflecting the new hardware
9861 * state we wish to commit, then querying DC to see if it is programmable. It's
9862 * important not to modify the existing DC state. Otherwise, atomic_check
9863 * may unexpectedly commit hardware changes.
9865 * When validating the DC state, it's important that the right locks are
9866 * acquired. For full updates case which removes/adds/updates streams on one
9867 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9868 * that any such full update commit will wait for completion of any outstanding
9869 * flip using DRMs synchronization events.
9871 * Note that DM adds the affected connectors for all CRTCs in state, when that
9872 * might not seem necessary. This is because DC stream creation requires the
9873 * DC sink, which is tied to the DRM connector state. Cleaning this up should
9874 * be possible but non-trivial - a possible TODO item.
9876 * Return: -Error code if validation failed.
9878 static int amdgpu_dm_atomic_check(struct drm_device *dev,
9879 struct drm_atomic_state *state)
9881 struct amdgpu_device *adev = drm_to_adev(dev);
9882 struct dm_atomic_state *dm_state = NULL;
9883 struct dc *dc = adev->dm.dc;
9884 struct drm_connector *connector;
9885 struct drm_connector_state *old_con_state, *new_con_state;
9886 struct drm_crtc *crtc;
9887 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9888 struct drm_plane *plane;
9889 struct drm_plane_state *old_plane_state, *new_plane_state;
9890 enum dc_status status;
9892 bool lock_and_validation_needed = false;
9893 struct dm_crtc_state *dm_old_crtc_state;
9895 trace_amdgpu_dm_atomic_check_begin(state);
9897 ret = drm_atomic_helper_check_modeset(dev, state);
9901 /* Check connector changes */
9902 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9903 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9904 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9906 /* Skip connectors that are disabled or part of modeset already. */
9907 if (!old_con_state->crtc && !new_con_state->crtc)
9910 if (!new_con_state->crtc)
9913 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9914 if (IS_ERR(new_crtc_state)) {
9915 ret = PTR_ERR(new_crtc_state);
9919 if (dm_old_con_state->abm_level !=
9920 dm_new_con_state->abm_level)
9921 new_crtc_state->connectors_changed = true;
9924 #if defined(CONFIG_DRM_AMD_DC_DCN)
9925 if (dc_resource_is_dsc_encoding_supported(dc)) {
9926 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9927 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9928 ret = add_affected_mst_dsc_crtcs(state, crtc);
9935 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9936 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9938 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
9939 !new_crtc_state->color_mgmt_changed &&
9940 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9941 dm_old_crtc_state->dsc_force_changed == false)
9944 if (!new_crtc_state->enable)
9947 ret = drm_atomic_add_affected_connectors(state, crtc);
9951 ret = drm_atomic_add_affected_planes(state, crtc);
9955 if (dm_old_crtc_state->dsc_force_changed)
9956 new_crtc_state->mode_changed = true;
9960 * Add all primary and overlay planes on the CRTC to the state
9961 * whenever a plane is enabled to maintain correct z-ordering
9962 * and to enable fast surface updates.
9964 drm_for_each_crtc(crtc, dev) {
9965 bool modified = false;
9967 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9968 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9971 if (new_plane_state->crtc == crtc ||
9972 old_plane_state->crtc == crtc) {
9981 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
9982 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9986 drm_atomic_get_plane_state(state, plane);
9988 if (IS_ERR(new_plane_state)) {
9989 ret = PTR_ERR(new_plane_state);
9995 /* Remove exiting planes if they are modified */
9996 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9997 ret = dm_update_plane_state(dc, state, plane,
10001 &lock_and_validation_needed);
10006 /* Disable all crtcs which require disable */
10007 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10008 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10012 &lock_and_validation_needed);
10017 /* Enable all crtcs which require enable */
10018 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10019 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10023 &lock_and_validation_needed);
10028 /* Add new/modified planes */
10029 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10030 ret = dm_update_plane_state(dc, state, plane,
10034 &lock_and_validation_needed);
10039 /* Run this here since we want to validate the streams we created */
10040 ret = drm_atomic_helper_check_planes(dev, state);
10044 /* Check cursor planes scaling */
10045 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10046 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10051 if (state->legacy_cursor_update) {
10053 * This is a fast cursor update coming from the plane update
10054 * helper, check if it can be done asynchronously for better
10057 state->async_update =
10058 !drm_atomic_helper_async_check(dev, state);
10061 * Skip the remaining global validation if this is an async
10062 * update. Cursor updates can be done without affecting
10063 * state or bandwidth calcs and this avoids the performance
10064 * penalty of locking the private state object and
10065 * allocating a new dc_state.
10067 if (state->async_update)
10071 /* Check scaling and underscan changes*/
10072 /* TODO Removed scaling changes validation due to inability to commit
10073 * new stream into context w\o causing full reset. Need to
10074 * decide how to handle.
10076 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10077 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10078 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10079 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10081 /* Skip any modesets/resets */
10082 if (!acrtc || drm_atomic_crtc_needs_modeset(
10083 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10086 /* Skip any thing not scale or underscan changes */
10087 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10090 lock_and_validation_needed = true;
10094 * Streams and planes are reset when there are changes that affect
10095 * bandwidth. Anything that affects bandwidth needs to go through
10096 * DC global validation to ensure that the configuration can be applied
10099 * We have to currently stall out here in atomic_check for outstanding
10100 * commits to finish in this case because our IRQ handlers reference
10101 * DRM state directly - we can end up disabling interrupts too early
10104 * TODO: Remove this stall and drop DM state private objects.
10106 if (lock_and_validation_needed) {
10107 ret = dm_atomic_get_state(state, &dm_state);
10111 ret = do_aquire_global_lock(dev, state);
10115 #if defined(CONFIG_DRM_AMD_DC_DCN)
10116 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
10119 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
10125 * Perform validation of MST topology in the state:
10126 * We need to perform MST atomic check before calling
10127 * dc_validate_global_state(), or there is a chance
10128 * to get stuck in an infinite loop and hang eventually.
10130 ret = drm_dp_mst_atomic_check(state);
10133 status = dc_validate_global_state(dc, dm_state->context, false);
10134 if (status != DC_OK) {
10135 DC_LOG_WARNING("DC global validation failure: %s (%d)",
10136 dc_status_to_str(status), status);
10142 * The commit is a fast update. Fast updates shouldn't change
10143 * the DC context, affect global validation, and can have their
10144 * commit work done in parallel with other commits not touching
10145 * the same resource. If we have a new DC context as part of
10146 * the DM atomic state from validation we need to free it and
10147 * retain the existing one instead.
10149 * Furthermore, since the DM atomic state only contains the DC
10150 * context and can safely be annulled, we can free the state
10151 * and clear the associated private object now to free
10152 * some memory and avoid a possible use-after-free later.
10155 for (i = 0; i < state->num_private_objs; i++) {
10156 struct drm_private_obj *obj = state->private_objs[i].ptr;
10158 if (obj->funcs == adev->dm.atomic_obj.funcs) {
10159 int j = state->num_private_objs-1;
10161 dm_atomic_destroy_state(obj,
10162 state->private_objs[i].state);
10164 /* If i is not at the end of the array then the
10165 * last element needs to be moved to where i was
10166 * before the array can safely be truncated.
10169 state->private_objs[i] =
10170 state->private_objs[j];
10172 state->private_objs[j].ptr = NULL;
10173 state->private_objs[j].state = NULL;
10174 state->private_objs[j].old_state = NULL;
10175 state->private_objs[j].new_state = NULL;
10177 state->num_private_objs = j;
10183 /* Store the overall update type for use later in atomic check. */
10184 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10185 struct dm_crtc_state *dm_new_crtc_state =
10186 to_dm_crtc_state(new_crtc_state);
10188 dm_new_crtc_state->update_type = lock_and_validation_needed ?
10193 /* Must be success */
10196 trace_amdgpu_dm_atomic_check_finish(state, ret);
10201 if (ret == -EDEADLK)
10202 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
10203 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
10204 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
10206 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
10208 trace_amdgpu_dm_atomic_check_finish(state, ret);
10213 static bool is_dp_capable_without_timing_msa(struct dc *dc,
10214 struct amdgpu_dm_connector *amdgpu_dm_connector)
10217 bool capable = false;
10219 if (amdgpu_dm_connector->dc_link &&
10220 dm_helpers_dp_read_dpcd(
10222 amdgpu_dm_connector->dc_link,
10223 DP_DOWN_STREAM_PORT_COUNT,
10225 sizeof(dpcd_data))) {
10226 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10232 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10233 uint8_t *edid_ext, int len,
10234 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10237 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10238 struct dc *dc = adev->dm.dc;
10240 /* send extension block to DMCU for parsing */
10241 for (i = 0; i < len; i += 8) {
10245 /* send 8 bytes a time */
10246 if (!dc_edid_parser_send_cea(dc, i, len, &edid_ext[i], 8))
10250 /* EDID block sent completed, expect result */
10251 int version, min_rate, max_rate;
10253 res = dc_edid_parser_recv_amd_vsdb(dc, &version, &min_rate, &max_rate);
10255 /* amd vsdb found */
10256 vsdb_info->freesync_supported = 1;
10257 vsdb_info->amd_vsdb_version = version;
10258 vsdb_info->min_refresh_rate_hz = min_rate;
10259 vsdb_info->max_refresh_rate_hz = max_rate;
10267 res = dc_edid_parser_recv_cea_ack(dc, &offset);
10275 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10276 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10278 uint8_t *edid_ext = NULL;
10280 bool valid_vsdb_found = false;
10282 /*----- drm_find_cea_extension() -----*/
10283 /* No EDID or EDID extensions */
10284 if (edid == NULL || edid->extensions == 0)
10287 /* Find CEA extension */
10288 for (i = 0; i < edid->extensions; i++) {
10289 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10290 if (edid_ext[0] == CEA_EXT)
10294 if (i == edid->extensions)
10297 /*----- cea_db_offsets() -----*/
10298 if (edid_ext[0] != CEA_EXT)
10301 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
10303 return valid_vsdb_found ? i : -ENODEV;
10306 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10310 struct detailed_timing *timing;
10311 struct detailed_non_pixel *data;
10312 struct detailed_data_monitor_range *range;
10313 struct amdgpu_dm_connector *amdgpu_dm_connector =
10314 to_amdgpu_dm_connector(connector);
10315 struct dm_connector_state *dm_con_state = NULL;
10317 struct drm_device *dev = connector->dev;
10318 struct amdgpu_device *adev = drm_to_adev(dev);
10319 bool freesync_capable = false;
10320 struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
10322 if (!connector->state) {
10323 DRM_ERROR("%s - Connector has no state", __func__);
10328 dm_con_state = to_dm_connector_state(connector->state);
10330 amdgpu_dm_connector->min_vfreq = 0;
10331 amdgpu_dm_connector->max_vfreq = 0;
10332 amdgpu_dm_connector->pixel_clock_mhz = 0;
10337 dm_con_state = to_dm_connector_state(connector->state);
10339 if (!amdgpu_dm_connector->dc_sink) {
10340 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
10343 if (!adev->dm.freesync_module)
10347 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10348 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
10349 bool edid_check_required = false;
10352 edid_check_required = is_dp_capable_without_timing_msa(
10354 amdgpu_dm_connector);
10357 if (edid_check_required == true && (edid->version > 1 ||
10358 (edid->version == 1 && edid->revision > 1))) {
10359 for (i = 0; i < 4; i++) {
10361 timing = &edid->detailed_timings[i];
10362 data = &timing->data.other_data;
10363 range = &data->data.range;
10365 * Check if monitor has continuous frequency mode
10367 if (data->type != EDID_DETAIL_MONITOR_RANGE)
10370 * Check for flag range limits only. If flag == 1 then
10371 * no additional timing information provided.
10372 * Default GTF, GTF Secondary curve and CVT are not
10375 if (range->flags != 1)
10378 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10379 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10380 amdgpu_dm_connector->pixel_clock_mhz =
10381 range->pixel_clock_mhz * 10;
10383 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10384 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10389 if (amdgpu_dm_connector->max_vfreq -
10390 amdgpu_dm_connector->min_vfreq > 10) {
10392 freesync_capable = true;
10395 } else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10396 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10397 if (i >= 0 && vsdb_info.freesync_supported) {
10398 timing = &edid->detailed_timings[i];
10399 data = &timing->data.other_data;
10401 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10402 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10403 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10404 freesync_capable = true;
10406 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10407 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10413 dm_con_state->freesync_capable = freesync_capable;
10415 if (connector->vrr_capable_property)
10416 drm_connector_set_vrr_capable_property(connector,
10420 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
10422 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
10424 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
10426 if (link->type == dc_connection_none)
10428 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
10429 dpcd_data, sizeof(dpcd_data))) {
10430 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
10432 if (dpcd_data[0] == 0) {
10433 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
10434 link->psr_settings.psr_feature_enabled = false;
10436 link->psr_settings.psr_version = DC_PSR_VERSION_1;
10437 link->psr_settings.psr_feature_enabled = true;
10440 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
10445 * amdgpu_dm_link_setup_psr() - configure psr link
10446 * @stream: stream state
10448 * Return: true if success
10450 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
10452 struct dc_link *link = NULL;
10453 struct psr_config psr_config = {0};
10454 struct psr_context psr_context = {0};
10457 if (stream == NULL)
10460 link = stream->link;
10462 psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
10464 if (psr_config.psr_version > 0) {
10465 psr_config.psr_exit_link_training_required = 0x1;
10466 psr_config.psr_frame_capture_indication_req = 0;
10467 psr_config.psr_rfb_setup_time = 0x37;
10468 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
10469 psr_config.allow_smu_optimizations = 0x0;
10471 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
10474 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
10480 * amdgpu_dm_psr_enable() - enable psr f/w
10481 * @stream: stream state
10483 * Return: true if success
10485 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
10487 struct dc_link *link = stream->link;
10488 unsigned int vsync_rate_hz = 0;
10489 struct dc_static_screen_params params = {0};
10490 /* Calculate number of static frames before generating interrupt to
10493 // Init fail safe of 2 frames static
10494 unsigned int num_frames_static = 2;
10496 DRM_DEBUG_DRIVER("Enabling psr...\n");
10498 vsync_rate_hz = div64_u64(div64_u64((
10499 stream->timing.pix_clk_100hz * 100),
10500 stream->timing.v_total),
10501 stream->timing.h_total);
10504 * Calculate number of frames such that at least 30 ms of time has
10507 if (vsync_rate_hz != 0) {
10508 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
10509 num_frames_static = (30000 / frame_time_microsec) + 1;
10512 params.triggers.cursor_update = true;
10513 params.triggers.overlay_update = true;
10514 params.triggers.surface_update = true;
10515 params.num_frames = num_frames_static;
10517 dc_stream_set_static_screen_params(link->ctx->dc,
10521 return dc_link_set_psr_allow_active(link, true, false, false);
10525 * amdgpu_dm_psr_disable() - disable psr f/w
10526 * @stream: stream state
10528 * Return: true if success
10530 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
10533 DRM_DEBUG_DRIVER("Disabling psr...\n");
10535 return dc_link_set_psr_allow_active(stream->link, false, true, false);
10539 * amdgpu_dm_psr_disable() - disable psr f/w
10540 * if psr is enabled on any stream
10542 * Return: true if success
10544 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
10546 DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
10547 return dc_set_psr_allow_active(dm->dc, false);
10550 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10552 struct amdgpu_device *adev = drm_to_adev(dev);
10553 struct dc *dc = adev->dm.dc;
10556 mutex_lock(&adev->dm.dc_lock);
10557 if (dc->current_state) {
10558 for (i = 0; i < dc->current_state->stream_count; ++i)
10559 dc->current_state->streams[i]
10560 ->triggered_crtc_reset.enabled =
10561 adev->dm.force_timing_sync;
10563 dm_enable_per_frame_crtc_master_sync(dc->current_state);
10564 dc_trigger_sync(dc, dc->current_state);
10566 mutex_unlock(&adev->dm.dc_lock);
10569 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10570 uint32_t value, const char *func_name)
10572 #ifdef DM_CHECK_ADDR_0
10573 if (address == 0) {
10574 DC_ERR("invalid register write. address = 0");
10578 cgs_write_register(ctx->cgs_device, address, value);
10579 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10582 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10583 const char *func_name)
10586 #ifdef DM_CHECK_ADDR_0
10587 if (address == 0) {
10588 DC_ERR("invalid register read; address = 0\n");
10593 if (ctx->dmub_srv &&
10594 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10595 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10600 value = cgs_read_register(ctx->cgs_device, address);
10602 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);