2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
29 #include "dm_services_types.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "dc/dc_edid_parser.h"
38 #include "amdgpu_dm_trace.h"
42 #include "amdgpu_display.h"
43 #include "amdgpu_ucode.h"
45 #include "amdgpu_dm.h"
46 #ifdef CONFIG_DRM_AMD_DC_HDCP
47 #include "amdgpu_dm_hdcp.h"
48 #include <drm/drm_hdcp.h>
50 #include "amdgpu_pm.h"
52 #include "amd_shared.h"
53 #include "amdgpu_dm_irq.h"
54 #include "dm_helpers.h"
55 #include "amdgpu_dm_mst_types.h"
56 #if defined(CONFIG_DEBUG_FS)
57 #include "amdgpu_dm_debugfs.h"
60 #include "ivsrcid/ivsrcid_vislands30.h"
62 #include <linux/module.h>
63 #include <linux/moduleparam.h>
64 #include <linux/types.h>
65 #include <linux/pm_runtime.h>
66 #include <linux/pci.h>
67 #include <linux/firmware.h>
68 #include <linux/component.h>
70 #include <drm/drm_atomic.h>
71 #include <drm/drm_atomic_uapi.h>
72 #include <drm/drm_atomic_helper.h>
73 #include <drm/drm_dp_mst_helper.h>
74 #include <drm/drm_fb_helper.h>
75 #include <drm/drm_fourcc.h>
76 #include <drm/drm_edid.h>
77 #include <drm/drm_vblank.h>
78 #include <drm/drm_audio_component.h>
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
88 #include "soc15_common.h"
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
98 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
99 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
100 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
101 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
103 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
105 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
108 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
109 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
111 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
112 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
114 /* Number of bytes in PSP header for firmware. */
115 #define PSP_HEADER_BYTES 0x100
117 /* Number of bytes in PSP footer for firmware. */
118 #define PSP_FOOTER_BYTES 0x100
123 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
124 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
125 * requests into DC requests, and DC responses into DRM responses.
127 * The root control structure is &struct amdgpu_display_manager.
130 /* basic init/fini API */
131 static int amdgpu_dm_init(struct amdgpu_device *adev);
132 static void amdgpu_dm_fini(struct amdgpu_device *adev);
133 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
135 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
137 switch (link->dpcd_caps.dongle_type) {
138 case DISPLAY_DONGLE_NONE:
139 return DRM_MODE_SUBCONNECTOR_Native;
140 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
141 return DRM_MODE_SUBCONNECTOR_VGA;
142 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
143 case DISPLAY_DONGLE_DP_DVI_DONGLE:
144 return DRM_MODE_SUBCONNECTOR_DVID;
145 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
146 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
147 return DRM_MODE_SUBCONNECTOR_HDMIA;
148 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
150 return DRM_MODE_SUBCONNECTOR_Unknown;
154 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
156 struct dc_link *link = aconnector->dc_link;
157 struct drm_connector *connector = &aconnector->base;
158 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
160 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
163 if (aconnector->dc_sink)
164 subconnector = get_subconnector_type(link);
166 drm_object_property_set_value(&connector->base,
167 connector->dev->mode_config.dp_subconnector_property,
172 * initializes drm_device display related structures, based on the information
173 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
174 * drm_encoder, drm_mode_config
176 * Returns 0 on success
178 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
179 /* removes and deallocates the drm structures, created by the above function */
180 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
182 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
183 struct drm_plane *plane,
184 unsigned long possible_crtcs,
185 const struct dc_plane_cap *plane_cap);
186 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
187 struct drm_plane *plane,
188 uint32_t link_index);
189 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
190 struct amdgpu_dm_connector *amdgpu_dm_connector,
192 struct amdgpu_encoder *amdgpu_encoder);
193 static int amdgpu_dm_encoder_init(struct drm_device *dev,
194 struct amdgpu_encoder *aencoder,
195 uint32_t link_index);
197 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
199 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
201 static int amdgpu_dm_atomic_check(struct drm_device *dev,
202 struct drm_atomic_state *state);
204 static void handle_cursor_update(struct drm_plane *plane,
205 struct drm_plane_state *old_plane_state);
207 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
208 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
209 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
210 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
211 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
213 static const struct drm_format_info *
214 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
217 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
218 struct drm_crtc_state *new_crtc_state);
220 * dm_vblank_get_counter
223 * Get counter for number of vertical blanks
226 * struct amdgpu_device *adev - [in] desired amdgpu device
227 * int disp_idx - [in] which CRTC to get the counter from
230 * Counter for vertical blanks
232 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
234 if (crtc >= adev->mode_info.num_crtc)
237 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
239 if (acrtc->dm_irq_params.stream == NULL) {
240 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
245 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
249 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
250 u32 *vbl, u32 *position)
252 uint32_t v_blank_start, v_blank_end, h_position, v_position;
254 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
257 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
259 if (acrtc->dm_irq_params.stream == NULL) {
260 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
266 * TODO rework base driver to use values directly.
267 * for now parse it back into reg-format
269 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
275 *position = v_position | (h_position << 16);
276 *vbl = v_blank_start | (v_blank_end << 16);
282 static bool dm_is_idle(void *handle)
288 static int dm_wait_for_idle(void *handle)
294 static bool dm_check_soft_reset(void *handle)
299 static int dm_soft_reset(void *handle)
305 static struct amdgpu_crtc *
306 get_crtc_by_otg_inst(struct amdgpu_device *adev,
309 struct drm_device *dev = adev_to_drm(adev);
310 struct drm_crtc *crtc;
311 struct amdgpu_crtc *amdgpu_crtc;
313 if (otg_inst == -1) {
315 return adev->mode_info.crtcs[0];
318 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
319 amdgpu_crtc = to_amdgpu_crtc(crtc);
321 if (amdgpu_crtc->otg_inst == otg_inst)
328 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
330 return acrtc->dm_irq_params.freesync_config.state ==
331 VRR_STATE_ACTIVE_VARIABLE ||
332 acrtc->dm_irq_params.freesync_config.state ==
333 VRR_STATE_ACTIVE_FIXED;
336 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
338 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
339 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
342 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
343 struct dm_crtc_state *new_state)
345 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
347 else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
354 * dm_pflip_high_irq() - Handle pageflip interrupt
355 * @interrupt_params: ignored
357 * Handles the pageflip interrupt by notifying all interested parties
358 * that the pageflip has been completed.
360 static void dm_pflip_high_irq(void *interrupt_params)
362 struct amdgpu_crtc *amdgpu_crtc;
363 struct common_irq_params *irq_params = interrupt_params;
364 struct amdgpu_device *adev = irq_params->adev;
366 struct drm_pending_vblank_event *e;
367 uint32_t vpos, hpos, v_blank_start, v_blank_end;
370 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
372 /* IRQ could occur when in initial stage */
373 /* TODO work and BO cleanup */
374 if (amdgpu_crtc == NULL) {
375 DC_LOG_PFLIP("CRTC is null, returning.\n");
379 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
381 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
382 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
383 amdgpu_crtc->pflip_status,
384 AMDGPU_FLIP_SUBMITTED,
385 amdgpu_crtc->crtc_id,
387 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
391 /* page flip completed. */
392 e = amdgpu_crtc->event;
393 amdgpu_crtc->event = NULL;
398 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
400 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
402 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
403 &v_blank_end, &hpos, &vpos) ||
404 (vpos < v_blank_start)) {
405 /* Update to correct count and vblank timestamp if racing with
406 * vblank irq. This also updates to the correct vblank timestamp
407 * even in VRR mode, as scanout is past the front-porch atm.
409 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
411 /* Wake up userspace by sending the pageflip event with proper
412 * count and timestamp of vblank of flip completion.
415 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
417 /* Event sent, so done with vblank for this flip */
418 drm_crtc_vblank_put(&amdgpu_crtc->base);
421 /* VRR active and inside front-porch: vblank count and
422 * timestamp for pageflip event will only be up to date after
423 * drm_crtc_handle_vblank() has been executed from late vblank
424 * irq handler after start of back-porch (vline 0). We queue the
425 * pageflip event for send-out by drm_crtc_handle_vblank() with
426 * updated timestamp and count, once it runs after us.
428 * We need to open-code this instead of using the helper
429 * drm_crtc_arm_vblank_event(), as that helper would
430 * call drm_crtc_accurate_vblank_count(), which we must
431 * not call in VRR mode while we are in front-porch!
434 /* sequence will be replaced by real count during send-out. */
435 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
436 e->pipe = amdgpu_crtc->crtc_id;
438 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
442 /* Keep track of vblank of this flip for flip throttling. We use the
443 * cooked hw counter, as that one incremented at start of this vblank
444 * of pageflip completion, so last_flip_vblank is the forbidden count
445 * for queueing new pageflips if vsync + VRR is enabled.
447 amdgpu_crtc->dm_irq_params.last_flip_vblank =
448 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
450 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
451 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
453 DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
454 amdgpu_crtc->crtc_id, amdgpu_crtc,
455 vrr_active, (int) !e);
458 static void dm_vupdate_high_irq(void *interrupt_params)
460 struct common_irq_params *irq_params = interrupt_params;
461 struct amdgpu_device *adev = irq_params->adev;
462 struct amdgpu_crtc *acrtc;
463 struct drm_device *drm_dev;
464 struct drm_vblank_crtc *vblank;
465 ktime_t frame_duration_ns, previous_timestamp;
469 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
472 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
473 drm_dev = acrtc->base.dev;
474 vblank = &drm_dev->vblank[acrtc->base.index];
475 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
476 frame_duration_ns = vblank->time - previous_timestamp;
478 if (frame_duration_ns > 0) {
479 trace_amdgpu_refresh_rate_track(acrtc->base.index,
481 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
482 atomic64_set(&irq_params->previous_timestamp, vblank->time);
485 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
489 /* Core vblank handling is done here after end of front-porch in
490 * vrr mode, as vblank timestamping will give valid results
491 * while now done after front-porch. This will also deliver
492 * page-flip completion events that have been queued to us
493 * if a pageflip happened inside front-porch.
496 drm_crtc_handle_vblank(&acrtc->base);
498 /* BTR processing for pre-DCE12 ASICs */
499 if (acrtc->dm_irq_params.stream &&
500 adev->family < AMDGPU_FAMILY_AI) {
501 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
502 mod_freesync_handle_v_update(
503 adev->dm.freesync_module,
504 acrtc->dm_irq_params.stream,
505 &acrtc->dm_irq_params.vrr_params);
507 dc_stream_adjust_vmin_vmax(
509 acrtc->dm_irq_params.stream,
510 &acrtc->dm_irq_params.vrr_params.adjust);
511 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
518 * dm_crtc_high_irq() - Handles CRTC interrupt
519 * @interrupt_params: used for determining the CRTC instance
521 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
524 static void dm_crtc_high_irq(void *interrupt_params)
526 struct common_irq_params *irq_params = interrupt_params;
527 struct amdgpu_device *adev = irq_params->adev;
528 struct amdgpu_crtc *acrtc;
532 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
536 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
538 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
539 vrr_active, acrtc->dm_irq_params.active_planes);
542 * Core vblank handling at start of front-porch is only possible
543 * in non-vrr mode, as only there vblank timestamping will give
544 * valid results while done in front-porch. Otherwise defer it
545 * to dm_vupdate_high_irq after end of front-porch.
548 drm_crtc_handle_vblank(&acrtc->base);
551 * Following stuff must happen at start of vblank, for crc
552 * computation and below-the-range btr support in vrr mode.
554 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
556 /* BTR updates need to happen before VUPDATE on Vega and above. */
557 if (adev->family < AMDGPU_FAMILY_AI)
560 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
562 if (acrtc->dm_irq_params.stream &&
563 acrtc->dm_irq_params.vrr_params.supported &&
564 acrtc->dm_irq_params.freesync_config.state ==
565 VRR_STATE_ACTIVE_VARIABLE) {
566 mod_freesync_handle_v_update(adev->dm.freesync_module,
567 acrtc->dm_irq_params.stream,
568 &acrtc->dm_irq_params.vrr_params);
570 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
571 &acrtc->dm_irq_params.vrr_params.adjust);
575 * If there aren't any active_planes then DCH HUBP may be clock-gated.
576 * In that case, pageflip completion interrupts won't fire and pageflip
577 * completion events won't get delivered. Prevent this by sending
578 * pending pageflip events from here if a flip is still pending.
580 * If any planes are enabled, use dm_pflip_high_irq() instead, to
581 * avoid race conditions between flip programming and completion,
582 * which could cause too early flip completion events.
584 if (adev->family >= AMDGPU_FAMILY_RV &&
585 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
586 acrtc->dm_irq_params.active_planes == 0) {
588 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
590 drm_crtc_vblank_put(&acrtc->base);
592 acrtc->pflip_status = AMDGPU_FLIP_NONE;
595 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
598 #if defined(CONFIG_DRM_AMD_DC_DCN)
600 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
601 * DCN generation ASICs
602 * @interrupt params - interrupt parameters
604 * Used to set crc window/read out crc value at vertical line 0 position
606 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
607 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
609 struct common_irq_params *irq_params = interrupt_params;
610 struct amdgpu_device *adev = irq_params->adev;
611 struct amdgpu_crtc *acrtc;
613 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
618 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
623 static int dm_set_clockgating_state(void *handle,
624 enum amd_clockgating_state state)
629 static int dm_set_powergating_state(void *handle,
630 enum amd_powergating_state state)
635 /* Prototypes of private functions */
636 static int dm_early_init(void* handle);
638 /* Allocate memory for FBC compressed data */
639 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
641 struct drm_device *dev = connector->dev;
642 struct amdgpu_device *adev = drm_to_adev(dev);
643 struct dm_compressor_info *compressor = &adev->dm.compressor;
644 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
645 struct drm_display_mode *mode;
646 unsigned long max_size = 0;
648 if (adev->dm.dc->fbc_compressor == NULL)
651 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
654 if (compressor->bo_ptr)
658 list_for_each_entry(mode, &connector->modes, head) {
659 if (max_size < mode->htotal * mode->vtotal)
660 max_size = mode->htotal * mode->vtotal;
664 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
665 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
666 &compressor->gpu_addr, &compressor->cpu_addr);
669 DRM_ERROR("DM: Failed to initialize FBC\n");
671 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
672 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
679 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
680 int pipe, bool *enabled,
681 unsigned char *buf, int max_bytes)
683 struct drm_device *dev = dev_get_drvdata(kdev);
684 struct amdgpu_device *adev = drm_to_adev(dev);
685 struct drm_connector *connector;
686 struct drm_connector_list_iter conn_iter;
687 struct amdgpu_dm_connector *aconnector;
692 mutex_lock(&adev->dm.audio_lock);
694 drm_connector_list_iter_begin(dev, &conn_iter);
695 drm_for_each_connector_iter(connector, &conn_iter) {
696 aconnector = to_amdgpu_dm_connector(connector);
697 if (aconnector->audio_inst != port)
701 ret = drm_eld_size(connector->eld);
702 memcpy(buf, connector->eld, min(max_bytes, ret));
706 drm_connector_list_iter_end(&conn_iter);
708 mutex_unlock(&adev->dm.audio_lock);
710 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
715 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
716 .get_eld = amdgpu_dm_audio_component_get_eld,
719 static int amdgpu_dm_audio_component_bind(struct device *kdev,
720 struct device *hda_kdev, void *data)
722 struct drm_device *dev = dev_get_drvdata(kdev);
723 struct amdgpu_device *adev = drm_to_adev(dev);
724 struct drm_audio_component *acomp = data;
726 acomp->ops = &amdgpu_dm_audio_component_ops;
728 adev->dm.audio_component = acomp;
733 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
734 struct device *hda_kdev, void *data)
736 struct drm_device *dev = dev_get_drvdata(kdev);
737 struct amdgpu_device *adev = drm_to_adev(dev);
738 struct drm_audio_component *acomp = data;
742 adev->dm.audio_component = NULL;
745 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
746 .bind = amdgpu_dm_audio_component_bind,
747 .unbind = amdgpu_dm_audio_component_unbind,
750 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
757 adev->mode_info.audio.enabled = true;
759 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
761 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
762 adev->mode_info.audio.pin[i].channels = -1;
763 adev->mode_info.audio.pin[i].rate = -1;
764 adev->mode_info.audio.pin[i].bits_per_sample = -1;
765 adev->mode_info.audio.pin[i].status_bits = 0;
766 adev->mode_info.audio.pin[i].category_code = 0;
767 adev->mode_info.audio.pin[i].connected = false;
768 adev->mode_info.audio.pin[i].id =
769 adev->dm.dc->res_pool->audios[i]->inst;
770 adev->mode_info.audio.pin[i].offset = 0;
773 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
777 adev->dm.audio_registered = true;
782 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
787 if (!adev->mode_info.audio.enabled)
790 if (adev->dm.audio_registered) {
791 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
792 adev->dm.audio_registered = false;
795 /* TODO: Disable audio? */
797 adev->mode_info.audio.enabled = false;
800 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
802 struct drm_audio_component *acomp = adev->dm.audio_component;
804 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
805 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
807 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
812 static int dm_dmub_hw_init(struct amdgpu_device *adev)
814 const struct dmcub_firmware_header_v1_0 *hdr;
815 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
816 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
817 const struct firmware *dmub_fw = adev->dm.dmub_fw;
818 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
819 struct abm *abm = adev->dm.dc->res_pool->abm;
820 struct dmub_srv_hw_params hw_params;
821 enum dmub_status status;
822 const unsigned char *fw_inst_const, *fw_bss_data;
823 uint32_t i, fw_inst_const_size, fw_bss_data_size;
827 /* DMUB isn't supported on the ASIC. */
831 DRM_ERROR("No framebuffer info for DMUB service.\n");
836 /* Firmware required for DMUB support. */
837 DRM_ERROR("No firmware provided for DMUB.\n");
841 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
842 if (status != DMUB_STATUS_OK) {
843 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
847 if (!has_hw_support) {
848 DRM_INFO("DMUB unsupported on ASIC\n");
852 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
854 fw_inst_const = dmub_fw->data +
855 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
858 fw_bss_data = dmub_fw->data +
859 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
860 le32_to_cpu(hdr->inst_const_bytes);
862 /* Copy firmware and bios info into FB memory. */
863 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
864 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
866 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
868 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
869 * amdgpu_ucode_init_single_fw will load dmub firmware
870 * fw_inst_const part to cw0; otherwise, the firmware back door load
871 * will be done by dm_dmub_hw_init
873 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
874 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
878 if (fw_bss_data_size)
879 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
880 fw_bss_data, fw_bss_data_size);
882 /* Copy firmware bios info into FB memory. */
883 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
886 /* Reset regions that need to be reset. */
887 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
888 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
890 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
891 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
893 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
894 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
896 /* Initialize hardware. */
897 memset(&hw_params, 0, sizeof(hw_params));
898 hw_params.fb_base = adev->gmc.fb_start;
899 hw_params.fb_offset = adev->gmc.aper_base;
901 /* backdoor load firmware and trigger dmub running */
902 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
903 hw_params.load_inst_const = true;
906 hw_params.psp_version = dmcu->psp_version;
908 for (i = 0; i < fb_info->num_fb; ++i)
909 hw_params.fb[i] = &fb_info->fb[i];
911 status = dmub_srv_hw_init(dmub_srv, &hw_params);
912 if (status != DMUB_STATUS_OK) {
913 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
917 /* Wait for firmware load to finish. */
918 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
919 if (status != DMUB_STATUS_OK)
920 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
922 /* Init DMCU and ABM if available. */
924 dmcu->funcs->dmcu_init(dmcu);
925 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
928 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
929 if (!adev->dm.dc->ctx->dmub_srv) {
930 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
934 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
935 adev->dm.dmcub_fw_version);
940 #if defined(CONFIG_DRM_AMD_DC_DCN)
941 #define DMUB_TRACE_MAX_READ 64
942 static void dm_dmub_trace_high_irq(void *interrupt_params)
944 struct common_irq_params *irq_params = interrupt_params;
945 struct amdgpu_device *adev = irq_params->adev;
946 struct amdgpu_display_manager *dm = &adev->dm;
947 struct dmcub_trace_buf_entry entry = { 0 };
951 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
952 trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
953 entry.param0, entry.param1);
955 DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
956 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
962 } while (count <= DMUB_TRACE_MAX_READ);
964 ASSERT(count <= DMUB_TRACE_MAX_READ);
967 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
970 uint32_t logical_addr_low;
971 uint32_t logical_addr_high;
972 uint32_t agp_base, agp_bot, agp_top;
973 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
975 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
976 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
978 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
980 * Raven2 has a HW issue that it is unable to use the vram which
981 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
982 * workaround that increase system aperture high address (add 1)
983 * to get rid of the VM fault and hardware hang.
985 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
987 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
990 agp_bot = adev->gmc.agp_start >> 24;
991 agp_top = adev->gmc.agp_end >> 24;
994 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
995 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
996 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
997 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
998 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
999 page_table_base.low_part = lower_32_bits(pt_base);
1001 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1002 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1004 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1005 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1006 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1008 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1009 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1010 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1012 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1013 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1014 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1016 pa_config->is_hvm_enabled = 0;
1020 #if defined(CONFIG_DRM_AMD_DC_DCN)
1021 static void event_mall_stutter(struct work_struct *work)
1024 struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
1025 struct amdgpu_display_manager *dm = vblank_work->dm;
1027 mutex_lock(&dm->dc_lock);
1029 if (vblank_work->enable)
1030 dm->active_vblank_irq_count++;
1031 else if(dm->active_vblank_irq_count)
1032 dm->active_vblank_irq_count--;
1034 dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1036 DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1038 mutex_unlock(&dm->dc_lock);
1041 static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
1044 int max_caps = dc->caps.max_links;
1045 struct vblank_workqueue *vblank_work;
1048 vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
1049 if (ZERO_OR_NULL_PTR(vblank_work)) {
1054 for (i = 0; i < max_caps; i++)
1055 INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
1060 static int amdgpu_dm_init(struct amdgpu_device *adev)
1062 struct dc_init_data init_data;
1063 #ifdef CONFIG_DRM_AMD_DC_HDCP
1064 struct dc_callback_init init_params;
1068 adev->dm.ddev = adev_to_drm(adev);
1069 adev->dm.adev = adev;
1071 /* Zero all the fields */
1072 memset(&init_data, 0, sizeof(init_data));
1073 #ifdef CONFIG_DRM_AMD_DC_HDCP
1074 memset(&init_params, 0, sizeof(init_params));
1077 mutex_init(&adev->dm.dc_lock);
1078 mutex_init(&adev->dm.audio_lock);
1079 #if defined(CONFIG_DRM_AMD_DC_DCN)
1080 spin_lock_init(&adev->dm.vblank_lock);
1083 if(amdgpu_dm_irq_init(adev)) {
1084 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1088 init_data.asic_id.chip_family = adev->family;
1090 init_data.asic_id.pci_revision_id = adev->pdev->revision;
1091 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1093 init_data.asic_id.vram_width = adev->gmc.vram_width;
1094 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1095 init_data.asic_id.atombios_base_address =
1096 adev->mode_info.atom_context->bios;
1098 init_data.driver = adev;
1100 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1102 if (!adev->dm.cgs_device) {
1103 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1107 init_data.cgs_device = adev->dm.cgs_device;
1109 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1111 switch (adev->asic_type) {
1116 init_data.flags.gpu_vm_support = true;
1117 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1118 init_data.flags.disable_dmcu = true;
1120 #if defined(CONFIG_DRM_AMD_DC_DCN)
1122 init_data.flags.gpu_vm_support = true;
1129 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1130 init_data.flags.fbc_support = true;
1132 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1133 init_data.flags.multi_mon_pp_mclk_switch = true;
1135 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1136 init_data.flags.disable_fractional_pwm = true;
1138 init_data.flags.power_down_display_on_boot = true;
1140 INIT_LIST_HEAD(&adev->dm.da_list);
1141 /* Display Core create. */
1142 adev->dm.dc = dc_create(&init_data);
1145 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1147 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1151 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1152 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1153 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1156 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1157 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1159 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1160 adev->dm.dc->debug.disable_stutter = true;
1162 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1163 adev->dm.dc->debug.disable_dsc = true;
1165 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1166 adev->dm.dc->debug.disable_clock_gate = true;
1168 r = dm_dmub_hw_init(adev);
1170 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1174 dc_hardware_init(adev->dm.dc);
1176 #if defined(CONFIG_DRM_AMD_DC_DCN)
1177 if (adev->apu_flags) {
1178 struct dc_phy_addr_space_config pa_config;
1180 mmhub_read_system_context(adev, &pa_config);
1182 // Call the DC init_memory func
1183 dc_setup_system_context(adev->dm.dc, &pa_config);
1187 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1188 if (!adev->dm.freesync_module) {
1190 "amdgpu: failed to initialize freesync_module.\n");
1192 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1193 adev->dm.freesync_module);
1195 amdgpu_dm_init_color_mod();
1197 #if defined(CONFIG_DRM_AMD_DC_DCN)
1198 if (adev->dm.dc->caps.max_links > 0) {
1199 adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
1201 if (!adev->dm.vblank_workqueue)
1202 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1204 DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
1208 #ifdef CONFIG_DRM_AMD_DC_HDCP
1209 if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1210 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1212 if (!adev->dm.hdcp_workqueue)
1213 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1215 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1217 dc_init_callbacks(adev->dm.dc, &init_params);
1220 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1221 adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1223 if (amdgpu_dm_initialize_drm_device(adev)) {
1225 "amdgpu: failed to initialize sw for display support.\n");
1229 /* create fake encoders for MST */
1230 dm_dp_create_fake_mst_encoders(adev);
1232 /* TODO: Add_display_info? */
1234 /* TODO use dynamic cursor width */
1235 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1236 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1238 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1240 "amdgpu: failed to initialize sw for display support.\n");
1245 DRM_DEBUG_DRIVER("KMS initialized.\n");
1249 amdgpu_dm_fini(adev);
1254 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1258 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1259 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1262 amdgpu_dm_audio_fini(adev);
1264 amdgpu_dm_destroy_drm_device(&adev->dm);
1266 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1267 if (adev->dm.crc_rd_wrk) {
1268 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1269 kfree(adev->dm.crc_rd_wrk);
1270 adev->dm.crc_rd_wrk = NULL;
1273 #ifdef CONFIG_DRM_AMD_DC_HDCP
1274 if (adev->dm.hdcp_workqueue) {
1275 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1276 adev->dm.hdcp_workqueue = NULL;
1280 dc_deinit_callbacks(adev->dm.dc);
1283 #if defined(CONFIG_DRM_AMD_DC_DCN)
1284 if (adev->dm.vblank_workqueue) {
1285 adev->dm.vblank_workqueue->dm = NULL;
1286 kfree(adev->dm.vblank_workqueue);
1287 adev->dm.vblank_workqueue = NULL;
1291 if (adev->dm.dc->ctx->dmub_srv) {
1292 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1293 adev->dm.dc->ctx->dmub_srv = NULL;
1296 if (adev->dm.dmub_bo)
1297 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1298 &adev->dm.dmub_bo_gpu_addr,
1299 &adev->dm.dmub_bo_cpu_addr);
1301 /* DC Destroy TODO: Replace destroy DAL */
1303 dc_destroy(&adev->dm.dc);
1305 * TODO: pageflip, vlank interrupt
1307 * amdgpu_dm_irq_fini(adev);
1310 if (adev->dm.cgs_device) {
1311 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1312 adev->dm.cgs_device = NULL;
1314 if (adev->dm.freesync_module) {
1315 mod_freesync_destroy(adev->dm.freesync_module);
1316 adev->dm.freesync_module = NULL;
1319 mutex_destroy(&adev->dm.audio_lock);
1320 mutex_destroy(&adev->dm.dc_lock);
1325 static int load_dmcu_fw(struct amdgpu_device *adev)
1327 const char *fw_name_dmcu = NULL;
1329 const struct dmcu_firmware_header_v1_0 *hdr;
1331 switch(adev->asic_type) {
1332 #if defined(CONFIG_DRM_AMD_DC_SI)
1347 case CHIP_POLARIS11:
1348 case CHIP_POLARIS10:
1349 case CHIP_POLARIS12:
1357 case CHIP_SIENNA_CICHLID:
1358 case CHIP_NAVY_FLOUNDER:
1359 case CHIP_DIMGREY_CAVEFISH:
1363 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1366 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1367 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1368 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1369 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1374 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1378 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1379 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1383 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1385 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1386 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1387 adev->dm.fw_dmcu = NULL;
1391 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1396 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1398 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1400 release_firmware(adev->dm.fw_dmcu);
1401 adev->dm.fw_dmcu = NULL;
1405 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1406 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1407 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1408 adev->firmware.fw_size +=
1409 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1411 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1412 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1413 adev->firmware.fw_size +=
1414 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1416 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1418 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1423 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1425 struct amdgpu_device *adev = ctx;
1427 return dm_read_reg(adev->dm.dc->ctx, address);
1430 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1433 struct amdgpu_device *adev = ctx;
1435 return dm_write_reg(adev->dm.dc->ctx, address, value);
1438 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1440 struct dmub_srv_create_params create_params;
1441 struct dmub_srv_region_params region_params;
1442 struct dmub_srv_region_info region_info;
1443 struct dmub_srv_fb_params fb_params;
1444 struct dmub_srv_fb_info *fb_info;
1445 struct dmub_srv *dmub_srv;
1446 const struct dmcub_firmware_header_v1_0 *hdr;
1447 const char *fw_name_dmub;
1448 enum dmub_asic dmub_asic;
1449 enum dmub_status status;
1452 switch (adev->asic_type) {
1454 dmub_asic = DMUB_ASIC_DCN21;
1455 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1456 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1457 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1459 case CHIP_SIENNA_CICHLID:
1460 dmub_asic = DMUB_ASIC_DCN30;
1461 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1463 case CHIP_NAVY_FLOUNDER:
1464 dmub_asic = DMUB_ASIC_DCN30;
1465 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1468 dmub_asic = DMUB_ASIC_DCN301;
1469 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1471 case CHIP_DIMGREY_CAVEFISH:
1472 dmub_asic = DMUB_ASIC_DCN302;
1473 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1477 /* ASIC doesn't support DMUB. */
1481 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1483 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1487 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1489 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1493 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1495 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1496 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1497 AMDGPU_UCODE_ID_DMCUB;
1498 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1500 adev->firmware.fw_size +=
1501 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1503 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1504 adev->dm.dmcub_fw_version);
1507 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1509 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1510 dmub_srv = adev->dm.dmub_srv;
1513 DRM_ERROR("Failed to allocate DMUB service!\n");
1517 memset(&create_params, 0, sizeof(create_params));
1518 create_params.user_ctx = adev;
1519 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1520 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1521 create_params.asic = dmub_asic;
1523 /* Create the DMUB service. */
1524 status = dmub_srv_create(dmub_srv, &create_params);
1525 if (status != DMUB_STATUS_OK) {
1526 DRM_ERROR("Error creating DMUB service: %d\n", status);
1530 /* Calculate the size of all the regions for the DMUB service. */
1531 memset(®ion_params, 0, sizeof(region_params));
1533 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1534 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1535 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1536 region_params.vbios_size = adev->bios_size;
1537 region_params.fw_bss_data = region_params.bss_data_size ?
1538 adev->dm.dmub_fw->data +
1539 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1540 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1541 region_params.fw_inst_const =
1542 adev->dm.dmub_fw->data +
1543 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1546 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
1549 if (status != DMUB_STATUS_OK) {
1550 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1555 * Allocate a framebuffer based on the total size of all the regions.
1556 * TODO: Move this into GART.
1558 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1559 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1560 &adev->dm.dmub_bo_gpu_addr,
1561 &adev->dm.dmub_bo_cpu_addr);
1565 /* Rebase the regions on the framebuffer address. */
1566 memset(&fb_params, 0, sizeof(fb_params));
1567 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1568 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1569 fb_params.region_info = ®ion_info;
1571 adev->dm.dmub_fb_info =
1572 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1573 fb_info = adev->dm.dmub_fb_info;
1577 "Failed to allocate framebuffer info for DMUB service!\n");
1581 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1582 if (status != DMUB_STATUS_OK) {
1583 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1590 static int dm_sw_init(void *handle)
1592 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1595 r = dm_dmub_sw_init(adev);
1599 return load_dmcu_fw(adev);
1602 static int dm_sw_fini(void *handle)
1604 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1606 kfree(adev->dm.dmub_fb_info);
1607 adev->dm.dmub_fb_info = NULL;
1609 if (adev->dm.dmub_srv) {
1610 dmub_srv_destroy(adev->dm.dmub_srv);
1611 adev->dm.dmub_srv = NULL;
1614 release_firmware(adev->dm.dmub_fw);
1615 adev->dm.dmub_fw = NULL;
1617 release_firmware(adev->dm.fw_dmcu);
1618 adev->dm.fw_dmcu = NULL;
1623 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1625 struct amdgpu_dm_connector *aconnector;
1626 struct drm_connector *connector;
1627 struct drm_connector_list_iter iter;
1630 drm_connector_list_iter_begin(dev, &iter);
1631 drm_for_each_connector_iter(connector, &iter) {
1632 aconnector = to_amdgpu_dm_connector(connector);
1633 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1634 aconnector->mst_mgr.aux) {
1635 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1637 aconnector->base.base.id);
1639 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1641 DRM_ERROR("DM_MST: Failed to start MST\n");
1642 aconnector->dc_link->type =
1643 dc_connection_single;
1648 drm_connector_list_iter_end(&iter);
1653 static int dm_late_init(void *handle)
1655 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1657 struct dmcu_iram_parameters params;
1658 unsigned int linear_lut[16];
1660 struct dmcu *dmcu = NULL;
1663 dmcu = adev->dm.dc->res_pool->dmcu;
1665 for (i = 0; i < 16; i++)
1666 linear_lut[i] = 0xFFFF * i / 15;
1669 params.backlight_ramping_start = 0xCCCC;
1670 params.backlight_ramping_reduction = 0xCCCCCCCC;
1671 params.backlight_lut_array_size = 16;
1672 params.backlight_lut_array = linear_lut;
1674 /* Min backlight level after ABM reduction, Don't allow below 1%
1675 * 0xFFFF x 0.01 = 0x28F
1677 params.min_abm_backlight = 0x28F;
1679 /* In the case where abm is implemented on dmcub,
1680 * dmcu object will be null.
1681 * ABM 2.4 and up are implemented on dmcub.
1684 ret = dmcu_load_iram(dmcu, params);
1685 else if (adev->dm.dc->ctx->dmub_srv)
1686 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1691 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1694 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1696 struct amdgpu_dm_connector *aconnector;
1697 struct drm_connector *connector;
1698 struct drm_connector_list_iter iter;
1699 struct drm_dp_mst_topology_mgr *mgr;
1701 bool need_hotplug = false;
1703 drm_connector_list_iter_begin(dev, &iter);
1704 drm_for_each_connector_iter(connector, &iter) {
1705 aconnector = to_amdgpu_dm_connector(connector);
1706 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1707 aconnector->mst_port)
1710 mgr = &aconnector->mst_mgr;
1713 drm_dp_mst_topology_mgr_suspend(mgr);
1715 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1717 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1718 need_hotplug = true;
1722 drm_connector_list_iter_end(&iter);
1725 drm_kms_helper_hotplug_event(dev);
1728 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1730 struct smu_context *smu = &adev->smu;
1733 if (!is_support_sw_smu(adev))
1736 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1737 * on window driver dc implementation.
1738 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1739 * should be passed to smu during boot up and resume from s3.
1740 * boot up: dc calculate dcn watermark clock settings within dc_create,
1741 * dcn20_resource_construct
1742 * then call pplib functions below to pass the settings to smu:
1743 * smu_set_watermarks_for_clock_ranges
1744 * smu_set_watermarks_table
1745 * navi10_set_watermarks_table
1746 * smu_write_watermarks_table
1748 * For Renoir, clock settings of dcn watermark are also fixed values.
1749 * dc has implemented different flow for window driver:
1750 * dc_hardware_init / dc_set_power_state
1755 * smu_set_watermarks_for_clock_ranges
1756 * renoir_set_watermarks_table
1757 * smu_write_watermarks_table
1760 * dc_hardware_init -> amdgpu_dm_init
1761 * dc_set_power_state --> dm_resume
1763 * therefore, this function apply to navi10/12/14 but not Renoir
1766 switch(adev->asic_type) {
1775 ret = smu_write_watermarks_table(smu);
1777 DRM_ERROR("Failed to update WMTABLE!\n");
1785 * dm_hw_init() - Initialize DC device
1786 * @handle: The base driver device containing the amdgpu_dm device.
1788 * Initialize the &struct amdgpu_display_manager device. This involves calling
1789 * the initializers of each DM component, then populating the struct with them.
1791 * Although the function implies hardware initialization, both hardware and
1792 * software are initialized here. Splitting them out to their relevant init
1793 * hooks is a future TODO item.
1795 * Some notable things that are initialized here:
1797 * - Display Core, both software and hardware
1798 * - DC modules that we need (freesync and color management)
1799 * - DRM software states
1800 * - Interrupt sources and handlers
1802 * - Debug FS entries, if enabled
1804 static int dm_hw_init(void *handle)
1806 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1807 /* Create DAL display manager */
1808 amdgpu_dm_init(adev);
1809 amdgpu_dm_hpd_init(adev);
1815 * dm_hw_fini() - Teardown DC device
1816 * @handle: The base driver device containing the amdgpu_dm device.
1818 * Teardown components within &struct amdgpu_display_manager that require
1819 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1820 * were loaded. Also flush IRQ workqueues and disable them.
1822 static int dm_hw_fini(void *handle)
1824 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1826 amdgpu_dm_hpd_fini(adev);
1828 amdgpu_dm_irq_fini(adev);
1829 amdgpu_dm_fini(adev);
1834 static int dm_enable_vblank(struct drm_crtc *crtc);
1835 static void dm_disable_vblank(struct drm_crtc *crtc);
1837 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1838 struct dc_state *state, bool enable)
1840 enum dc_irq_source irq_source;
1841 struct amdgpu_crtc *acrtc;
1845 for (i = 0; i < state->stream_count; i++) {
1846 acrtc = get_crtc_by_otg_inst(
1847 adev, state->stream_status[i].primary_otg_inst);
1849 if (acrtc && state->stream_status[i].plane_count != 0) {
1850 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1851 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1852 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
1853 acrtc->crtc_id, enable ? "en" : "dis", rc);
1855 DRM_WARN("Failed to %s pflip interrupts\n",
1856 enable ? "enable" : "disable");
1859 rc = dm_enable_vblank(&acrtc->base);
1861 DRM_WARN("Failed to enable vblank interrupts\n");
1863 dm_disable_vblank(&acrtc->base);
1871 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1873 struct dc_state *context = NULL;
1874 enum dc_status res = DC_ERROR_UNEXPECTED;
1876 struct dc_stream_state *del_streams[MAX_PIPES];
1877 int del_streams_count = 0;
1879 memset(del_streams, 0, sizeof(del_streams));
1881 context = dc_create_state(dc);
1882 if (context == NULL)
1883 goto context_alloc_fail;
1885 dc_resource_state_copy_construct_current(dc, context);
1887 /* First remove from context all streams */
1888 for (i = 0; i < context->stream_count; i++) {
1889 struct dc_stream_state *stream = context->streams[i];
1891 del_streams[del_streams_count++] = stream;
1894 /* Remove all planes for removed streams and then remove the streams */
1895 for (i = 0; i < del_streams_count; i++) {
1896 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1897 res = DC_FAIL_DETACH_SURFACES;
1901 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1907 res = dc_validate_global_state(dc, context, false);
1910 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1914 res = dc_commit_state(dc, context);
1917 dc_release_state(context);
1923 static int dm_suspend(void *handle)
1925 struct amdgpu_device *adev = handle;
1926 struct amdgpu_display_manager *dm = &adev->dm;
1929 if (amdgpu_in_reset(adev)) {
1930 mutex_lock(&dm->dc_lock);
1932 #if defined(CONFIG_DRM_AMD_DC_DCN)
1933 dc_allow_idle_optimizations(adev->dm.dc, false);
1936 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1938 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1940 amdgpu_dm_commit_zero_streams(dm->dc);
1942 amdgpu_dm_irq_suspend(adev);
1947 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
1948 amdgpu_dm_crtc_secure_display_suspend(adev);
1950 WARN_ON(adev->dm.cached_state);
1951 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1953 s3_handle_mst(adev_to_drm(adev), true);
1955 amdgpu_dm_irq_suspend(adev);
1958 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1963 static struct amdgpu_dm_connector *
1964 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1965 struct drm_crtc *crtc)
1968 struct drm_connector_state *new_con_state;
1969 struct drm_connector *connector;
1970 struct drm_crtc *crtc_from_state;
1972 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1973 crtc_from_state = new_con_state->crtc;
1975 if (crtc_from_state == crtc)
1976 return to_amdgpu_dm_connector(connector);
1982 static void emulated_link_detect(struct dc_link *link)
1984 struct dc_sink_init_data sink_init_data = { 0 };
1985 struct display_sink_capability sink_caps = { 0 };
1986 enum dc_edid_status edid_status;
1987 struct dc_context *dc_ctx = link->ctx;
1988 struct dc_sink *sink = NULL;
1989 struct dc_sink *prev_sink = NULL;
1991 link->type = dc_connection_none;
1992 prev_sink = link->local_sink;
1995 dc_sink_release(prev_sink);
1997 switch (link->connector_signal) {
1998 case SIGNAL_TYPE_HDMI_TYPE_A: {
1999 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2000 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2004 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2005 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2006 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2010 case SIGNAL_TYPE_DVI_DUAL_LINK: {
2011 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2012 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2016 case SIGNAL_TYPE_LVDS: {
2017 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2018 sink_caps.signal = SIGNAL_TYPE_LVDS;
2022 case SIGNAL_TYPE_EDP: {
2023 sink_caps.transaction_type =
2024 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2025 sink_caps.signal = SIGNAL_TYPE_EDP;
2029 case SIGNAL_TYPE_DISPLAY_PORT: {
2030 sink_caps.transaction_type =
2031 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2032 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2037 DC_ERROR("Invalid connector type! signal:%d\n",
2038 link->connector_signal);
2042 sink_init_data.link = link;
2043 sink_init_data.sink_signal = sink_caps.signal;
2045 sink = dc_sink_create(&sink_init_data);
2047 DC_ERROR("Failed to create sink!\n");
2051 /* dc_sink_create returns a new reference */
2052 link->local_sink = sink;
2054 edid_status = dm_helpers_read_local_edid(
2059 if (edid_status != EDID_OK)
2060 DC_ERROR("Failed to read EDID");
2064 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2065 struct amdgpu_display_manager *dm)
2068 struct dc_surface_update surface_updates[MAX_SURFACES];
2069 struct dc_plane_info plane_infos[MAX_SURFACES];
2070 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2071 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2072 struct dc_stream_update stream_update;
2076 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2079 dm_error("Failed to allocate update bundle\n");
2083 for (k = 0; k < dc_state->stream_count; k++) {
2084 bundle->stream_update.stream = dc_state->streams[k];
2086 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2087 bundle->surface_updates[m].surface =
2088 dc_state->stream_status->plane_states[m];
2089 bundle->surface_updates[m].surface->force_full_update =
2092 dc_commit_updates_for_stream(
2093 dm->dc, bundle->surface_updates,
2094 dc_state->stream_status->plane_count,
2095 dc_state->streams[k], &bundle->stream_update, dc_state);
2104 static void dm_set_dpms_off(struct dc_link *link)
2106 struct dc_stream_state *stream_state;
2107 struct amdgpu_dm_connector *aconnector = link->priv;
2108 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2109 struct dc_stream_update stream_update;
2110 bool dpms_off = true;
2112 memset(&stream_update, 0, sizeof(stream_update));
2113 stream_update.dpms_off = &dpms_off;
2115 mutex_lock(&adev->dm.dc_lock);
2116 stream_state = dc_stream_find_from_link(link);
2118 if (stream_state == NULL) {
2119 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2120 mutex_unlock(&adev->dm.dc_lock);
2124 stream_update.stream = stream_state;
2125 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2126 stream_state, &stream_update,
2127 stream_state->ctx->dc->current_state);
2128 mutex_unlock(&adev->dm.dc_lock);
2131 static int dm_resume(void *handle)
2133 struct amdgpu_device *adev = handle;
2134 struct drm_device *ddev = adev_to_drm(adev);
2135 struct amdgpu_display_manager *dm = &adev->dm;
2136 struct amdgpu_dm_connector *aconnector;
2137 struct drm_connector *connector;
2138 struct drm_connector_list_iter iter;
2139 struct drm_crtc *crtc;
2140 struct drm_crtc_state *new_crtc_state;
2141 struct dm_crtc_state *dm_new_crtc_state;
2142 struct drm_plane *plane;
2143 struct drm_plane_state *new_plane_state;
2144 struct dm_plane_state *dm_new_plane_state;
2145 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2146 enum dc_connection_type new_connection_type = dc_connection_none;
2147 struct dc_state *dc_state;
2150 if (amdgpu_in_reset(adev)) {
2151 dc_state = dm->cached_dc_state;
2153 r = dm_dmub_hw_init(adev);
2155 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2157 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2160 amdgpu_dm_irq_resume_early(adev);
2162 for (i = 0; i < dc_state->stream_count; i++) {
2163 dc_state->streams[i]->mode_changed = true;
2164 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2165 dc_state->stream_status->plane_states[j]->update_flags.raw
2170 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2172 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2174 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2176 dc_release_state(dm->cached_dc_state);
2177 dm->cached_dc_state = NULL;
2179 amdgpu_dm_irq_resume_late(adev);
2181 mutex_unlock(&dm->dc_lock);
2185 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2186 dc_release_state(dm_state->context);
2187 dm_state->context = dc_create_state(dm->dc);
2188 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2189 dc_resource_state_construct(dm->dc, dm_state->context);
2191 /* Before powering on DC we need to re-initialize DMUB. */
2192 r = dm_dmub_hw_init(adev);
2194 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2196 /* power on hardware */
2197 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2199 /* program HPD filter */
2203 * early enable HPD Rx IRQ, should be done before set mode as short
2204 * pulse interrupts are used for MST
2206 amdgpu_dm_irq_resume_early(adev);
2208 /* On resume we need to rewrite the MSTM control bits to enable MST*/
2209 s3_handle_mst(ddev, false);
2212 drm_connector_list_iter_begin(ddev, &iter);
2213 drm_for_each_connector_iter(connector, &iter) {
2214 aconnector = to_amdgpu_dm_connector(connector);
2217 * this is the case when traversing through already created
2218 * MST connectors, should be skipped
2220 if (aconnector->mst_port)
2223 mutex_lock(&aconnector->hpd_lock);
2224 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2225 DRM_ERROR("KMS: Failed to detect connector\n");
2227 if (aconnector->base.force && new_connection_type == dc_connection_none)
2228 emulated_link_detect(aconnector->dc_link);
2230 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2232 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2233 aconnector->fake_enable = false;
2235 if (aconnector->dc_sink)
2236 dc_sink_release(aconnector->dc_sink);
2237 aconnector->dc_sink = NULL;
2238 amdgpu_dm_update_connector_after_detect(aconnector);
2239 mutex_unlock(&aconnector->hpd_lock);
2241 drm_connector_list_iter_end(&iter);
2243 /* Force mode set in atomic commit */
2244 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2245 new_crtc_state->active_changed = true;
2248 * atomic_check is expected to create the dc states. We need to release
2249 * them here, since they were duplicated as part of the suspend
2252 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2253 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2254 if (dm_new_crtc_state->stream) {
2255 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2256 dc_stream_release(dm_new_crtc_state->stream);
2257 dm_new_crtc_state->stream = NULL;
2261 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2262 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2263 if (dm_new_plane_state->dc_state) {
2264 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2265 dc_plane_state_release(dm_new_plane_state->dc_state);
2266 dm_new_plane_state->dc_state = NULL;
2270 drm_atomic_helper_resume(ddev, dm->cached_state);
2272 dm->cached_state = NULL;
2274 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
2275 amdgpu_dm_crtc_secure_display_resume(adev);
2278 amdgpu_dm_irq_resume_late(adev);
2280 amdgpu_dm_smu_write_watermarks_table(adev);
2288 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2289 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2290 * the base driver's device list to be initialized and torn down accordingly.
2292 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2295 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2297 .early_init = dm_early_init,
2298 .late_init = dm_late_init,
2299 .sw_init = dm_sw_init,
2300 .sw_fini = dm_sw_fini,
2301 .hw_init = dm_hw_init,
2302 .hw_fini = dm_hw_fini,
2303 .suspend = dm_suspend,
2304 .resume = dm_resume,
2305 .is_idle = dm_is_idle,
2306 .wait_for_idle = dm_wait_for_idle,
2307 .check_soft_reset = dm_check_soft_reset,
2308 .soft_reset = dm_soft_reset,
2309 .set_clockgating_state = dm_set_clockgating_state,
2310 .set_powergating_state = dm_set_powergating_state,
2313 const struct amdgpu_ip_block_version dm_ip_block =
2315 .type = AMD_IP_BLOCK_TYPE_DCE,
2319 .funcs = &amdgpu_dm_funcs,
2329 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2330 .fb_create = amdgpu_display_user_framebuffer_create,
2331 .get_format_info = amd_get_format_info,
2332 .output_poll_changed = drm_fb_helper_output_poll_changed,
2333 .atomic_check = amdgpu_dm_atomic_check,
2334 .atomic_commit = drm_atomic_helper_commit,
2337 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2338 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2341 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2343 u32 max_cll, min_cll, max, min, q, r;
2344 struct amdgpu_dm_backlight_caps *caps;
2345 struct amdgpu_display_manager *dm;
2346 struct drm_connector *conn_base;
2347 struct amdgpu_device *adev;
2348 struct dc_link *link = NULL;
2349 static const u8 pre_computed_values[] = {
2350 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2351 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2353 if (!aconnector || !aconnector->dc_link)
2356 link = aconnector->dc_link;
2357 if (link->connector_signal != SIGNAL_TYPE_EDP)
2360 conn_base = &aconnector->base;
2361 adev = drm_to_adev(conn_base->dev);
2363 caps = &dm->backlight_caps;
2364 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2365 caps->aux_support = false;
2366 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2367 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2369 if (caps->ext_caps->bits.oled == 1 ||
2370 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2371 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2372 caps->aux_support = true;
2374 if (amdgpu_backlight == 0)
2375 caps->aux_support = false;
2376 else if (amdgpu_backlight == 1)
2377 caps->aux_support = true;
2379 /* From the specification (CTA-861-G), for calculating the maximum
2380 * luminance we need to use:
2381 * Luminance = 50*2**(CV/32)
2382 * Where CV is a one-byte value.
2383 * For calculating this expression we may need float point precision;
2384 * to avoid this complexity level, we take advantage that CV is divided
2385 * by a constant. From the Euclids division algorithm, we know that CV
2386 * can be written as: CV = 32*q + r. Next, we replace CV in the
2387 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2388 * need to pre-compute the value of r/32. For pre-computing the values
2389 * We just used the following Ruby line:
2390 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2391 * The results of the above expressions can be verified at
2392 * pre_computed_values.
2396 max = (1 << q) * pre_computed_values[r];
2398 // min luminance: maxLum * (CV/255)^2 / 100
2399 q = DIV_ROUND_CLOSEST(min_cll, 255);
2400 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2402 caps->aux_max_input_signal = max;
2403 caps->aux_min_input_signal = min;
2406 void amdgpu_dm_update_connector_after_detect(
2407 struct amdgpu_dm_connector *aconnector)
2409 struct drm_connector *connector = &aconnector->base;
2410 struct drm_device *dev = connector->dev;
2411 struct dc_sink *sink;
2413 /* MST handled by drm_mst framework */
2414 if (aconnector->mst_mgr.mst_state == true)
2417 sink = aconnector->dc_link->local_sink;
2419 dc_sink_retain(sink);
2422 * Edid mgmt connector gets first update only in mode_valid hook and then
2423 * the connector sink is set to either fake or physical sink depends on link status.
2424 * Skip if already done during boot.
2426 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2427 && aconnector->dc_em_sink) {
2430 * For S3 resume with headless use eml_sink to fake stream
2431 * because on resume connector->sink is set to NULL
2433 mutex_lock(&dev->mode_config.mutex);
2436 if (aconnector->dc_sink) {
2437 amdgpu_dm_update_freesync_caps(connector, NULL);
2439 * retain and release below are used to
2440 * bump up refcount for sink because the link doesn't point
2441 * to it anymore after disconnect, so on next crtc to connector
2442 * reshuffle by UMD we will get into unwanted dc_sink release
2444 dc_sink_release(aconnector->dc_sink);
2446 aconnector->dc_sink = sink;
2447 dc_sink_retain(aconnector->dc_sink);
2448 amdgpu_dm_update_freesync_caps(connector,
2451 amdgpu_dm_update_freesync_caps(connector, NULL);
2452 if (!aconnector->dc_sink) {
2453 aconnector->dc_sink = aconnector->dc_em_sink;
2454 dc_sink_retain(aconnector->dc_sink);
2458 mutex_unlock(&dev->mode_config.mutex);
2461 dc_sink_release(sink);
2466 * TODO: temporary guard to look for proper fix
2467 * if this sink is MST sink, we should not do anything
2469 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2470 dc_sink_release(sink);
2474 if (aconnector->dc_sink == sink) {
2476 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2479 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2480 aconnector->connector_id);
2482 dc_sink_release(sink);
2486 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2487 aconnector->connector_id, aconnector->dc_sink, sink);
2489 mutex_lock(&dev->mode_config.mutex);
2492 * 1. Update status of the drm connector
2493 * 2. Send an event and let userspace tell us what to do
2497 * TODO: check if we still need the S3 mode update workaround.
2498 * If yes, put it here.
2500 if (aconnector->dc_sink) {
2501 amdgpu_dm_update_freesync_caps(connector, NULL);
2502 dc_sink_release(aconnector->dc_sink);
2505 aconnector->dc_sink = sink;
2506 dc_sink_retain(aconnector->dc_sink);
2507 if (sink->dc_edid.length == 0) {
2508 aconnector->edid = NULL;
2509 if (aconnector->dc_link->aux_mode) {
2510 drm_dp_cec_unset_edid(
2511 &aconnector->dm_dp_aux.aux);
2515 (struct edid *)sink->dc_edid.raw_edid;
2517 drm_connector_update_edid_property(connector,
2519 if (aconnector->dc_link->aux_mode)
2520 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2524 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2525 update_connector_ext_caps(aconnector);
2527 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2528 amdgpu_dm_update_freesync_caps(connector, NULL);
2529 drm_connector_update_edid_property(connector, NULL);
2530 aconnector->num_modes = 0;
2531 dc_sink_release(aconnector->dc_sink);
2532 aconnector->dc_sink = NULL;
2533 aconnector->edid = NULL;
2534 #ifdef CONFIG_DRM_AMD_DC_HDCP
2535 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2536 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2537 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2541 mutex_unlock(&dev->mode_config.mutex);
2543 update_subconnector_property(aconnector);
2546 dc_sink_release(sink);
2549 static void handle_hpd_irq(void *param)
2551 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2552 struct drm_connector *connector = &aconnector->base;
2553 struct drm_device *dev = connector->dev;
2554 enum dc_connection_type new_connection_type = dc_connection_none;
2555 struct amdgpu_device *adev = drm_to_adev(dev);
2556 #ifdef CONFIG_DRM_AMD_DC_HDCP
2557 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2560 if (adev->dm.disable_hpd_irq)
2564 * In case of failure or MST no need to update connector status or notify the OS
2565 * since (for MST case) MST does this in its own context.
2567 mutex_lock(&aconnector->hpd_lock);
2569 #ifdef CONFIG_DRM_AMD_DC_HDCP
2570 if (adev->dm.hdcp_workqueue) {
2571 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2572 dm_con_state->update_hdcp = true;
2575 if (aconnector->fake_enable)
2576 aconnector->fake_enable = false;
2578 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2579 DRM_ERROR("KMS: Failed to detect connector\n");
2581 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2582 emulated_link_detect(aconnector->dc_link);
2585 drm_modeset_lock_all(dev);
2586 dm_restore_drm_connector_state(dev, connector);
2587 drm_modeset_unlock_all(dev);
2589 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2590 drm_kms_helper_hotplug_event(dev);
2592 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2593 if (new_connection_type == dc_connection_none &&
2594 aconnector->dc_link->type == dc_connection_none)
2595 dm_set_dpms_off(aconnector->dc_link);
2597 amdgpu_dm_update_connector_after_detect(aconnector);
2599 drm_modeset_lock_all(dev);
2600 dm_restore_drm_connector_state(dev, connector);
2601 drm_modeset_unlock_all(dev);
2603 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2604 drm_kms_helper_hotplug_event(dev);
2606 mutex_unlock(&aconnector->hpd_lock);
2610 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2612 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2614 bool new_irq_handled = false;
2616 int dpcd_bytes_to_read;
2618 const int max_process_count = 30;
2619 int process_count = 0;
2621 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2623 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2624 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2625 /* DPCD 0x200 - 0x201 for downstream IRQ */
2626 dpcd_addr = DP_SINK_COUNT;
2628 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2629 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2630 dpcd_addr = DP_SINK_COUNT_ESI;
2633 dret = drm_dp_dpcd_read(
2634 &aconnector->dm_dp_aux.aux,
2637 dpcd_bytes_to_read);
2639 while (dret == dpcd_bytes_to_read &&
2640 process_count < max_process_count) {
2646 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2647 /* handle HPD short pulse irq */
2648 if (aconnector->mst_mgr.mst_state)
2650 &aconnector->mst_mgr,
2654 if (new_irq_handled) {
2655 /* ACK at DPCD to notify down stream */
2656 const int ack_dpcd_bytes_to_write =
2657 dpcd_bytes_to_read - 1;
2659 for (retry = 0; retry < 3; retry++) {
2662 wret = drm_dp_dpcd_write(
2663 &aconnector->dm_dp_aux.aux,
2666 ack_dpcd_bytes_to_write);
2667 if (wret == ack_dpcd_bytes_to_write)
2671 /* check if there is new irq to be handled */
2672 dret = drm_dp_dpcd_read(
2673 &aconnector->dm_dp_aux.aux,
2676 dpcd_bytes_to_read);
2678 new_irq_handled = false;
2684 if (process_count == max_process_count)
2685 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2688 static void handle_hpd_rx_irq(void *param)
2690 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2691 struct drm_connector *connector = &aconnector->base;
2692 struct drm_device *dev = connector->dev;
2693 struct dc_link *dc_link = aconnector->dc_link;
2694 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2695 bool result = false;
2696 enum dc_connection_type new_connection_type = dc_connection_none;
2697 struct amdgpu_device *adev = drm_to_adev(dev);
2698 union hpd_irq_data hpd_irq_data;
2700 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2702 if (adev->dm.disable_hpd_irq)
2707 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2708 * conflict, after implement i2c helper, this mutex should be
2711 if (dc_link->type != dc_connection_mst_branch)
2712 mutex_lock(&aconnector->hpd_lock);
2714 read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2716 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2717 (dc_link->type == dc_connection_mst_branch)) {
2718 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2720 dm_handle_hpd_rx_irq(aconnector);
2722 } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2724 dm_handle_hpd_rx_irq(aconnector);
2729 if (!amdgpu_in_reset(adev))
2730 mutex_lock(&adev->dm.dc_lock);
2731 #ifdef CONFIG_DRM_AMD_DC_HDCP
2732 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2734 result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2736 if (!amdgpu_in_reset(adev))
2737 mutex_unlock(&adev->dm.dc_lock);
2740 if (result && !is_mst_root_connector) {
2741 /* Downstream Port status changed. */
2742 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2743 DRM_ERROR("KMS: Failed to detect connector\n");
2745 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2746 emulated_link_detect(dc_link);
2748 if (aconnector->fake_enable)
2749 aconnector->fake_enable = false;
2751 amdgpu_dm_update_connector_after_detect(aconnector);
2754 drm_modeset_lock_all(dev);
2755 dm_restore_drm_connector_state(dev, connector);
2756 drm_modeset_unlock_all(dev);
2758 drm_kms_helper_hotplug_event(dev);
2759 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2761 if (aconnector->fake_enable)
2762 aconnector->fake_enable = false;
2764 amdgpu_dm_update_connector_after_detect(aconnector);
2767 drm_modeset_lock_all(dev);
2768 dm_restore_drm_connector_state(dev, connector);
2769 drm_modeset_unlock_all(dev);
2771 drm_kms_helper_hotplug_event(dev);
2774 #ifdef CONFIG_DRM_AMD_DC_HDCP
2775 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2776 if (adev->dm.hdcp_workqueue)
2777 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2781 if (dc_link->type != dc_connection_mst_branch) {
2782 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2783 mutex_unlock(&aconnector->hpd_lock);
2787 static void register_hpd_handlers(struct amdgpu_device *adev)
2789 struct drm_device *dev = adev_to_drm(adev);
2790 struct drm_connector *connector;
2791 struct amdgpu_dm_connector *aconnector;
2792 const struct dc_link *dc_link;
2793 struct dc_interrupt_params int_params = {0};
2795 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2796 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2798 list_for_each_entry(connector,
2799 &dev->mode_config.connector_list, head) {
2801 aconnector = to_amdgpu_dm_connector(connector);
2802 dc_link = aconnector->dc_link;
2804 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2805 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2806 int_params.irq_source = dc_link->irq_source_hpd;
2808 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2810 (void *) aconnector);
2813 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2815 /* Also register for DP short pulse (hpd_rx). */
2816 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2817 int_params.irq_source = dc_link->irq_source_hpd_rx;
2819 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2821 (void *) aconnector);
2826 #if defined(CONFIG_DRM_AMD_DC_SI)
2827 /* Register IRQ sources and initialize IRQ callbacks */
2828 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2830 struct dc *dc = adev->dm.dc;
2831 struct common_irq_params *c_irq_params;
2832 struct dc_interrupt_params int_params = {0};
2835 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2837 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2838 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2841 * Actions of amdgpu_irq_add_id():
2842 * 1. Register a set() function with base driver.
2843 * Base driver will call set() function to enable/disable an
2844 * interrupt in DC hardware.
2845 * 2. Register amdgpu_dm_irq_handler().
2846 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2847 * coming from DC hardware.
2848 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2849 * for acknowledging and handling. */
2851 /* Use VBLANK interrupt */
2852 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2853 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2855 DRM_ERROR("Failed to add crtc irq id!\n");
2859 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2860 int_params.irq_source =
2861 dc_interrupt_to_irq_source(dc, i+1 , 0);
2863 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2865 c_irq_params->adev = adev;
2866 c_irq_params->irq_src = int_params.irq_source;
2868 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2869 dm_crtc_high_irq, c_irq_params);
2872 /* Use GRPH_PFLIP interrupt */
2873 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2874 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2875 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2877 DRM_ERROR("Failed to add page flip irq id!\n");
2881 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2882 int_params.irq_source =
2883 dc_interrupt_to_irq_source(dc, i, 0);
2885 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2887 c_irq_params->adev = adev;
2888 c_irq_params->irq_src = int_params.irq_source;
2890 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2891 dm_pflip_high_irq, c_irq_params);
2896 r = amdgpu_irq_add_id(adev, client_id,
2897 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2899 DRM_ERROR("Failed to add hpd irq id!\n");
2903 register_hpd_handlers(adev);
2909 /* Register IRQ sources and initialize IRQ callbacks */
2910 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2912 struct dc *dc = adev->dm.dc;
2913 struct common_irq_params *c_irq_params;
2914 struct dc_interrupt_params int_params = {0};
2917 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2919 if (adev->asic_type >= CHIP_VEGA10)
2920 client_id = SOC15_IH_CLIENTID_DCE;
2922 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2923 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2926 * Actions of amdgpu_irq_add_id():
2927 * 1. Register a set() function with base driver.
2928 * Base driver will call set() function to enable/disable an
2929 * interrupt in DC hardware.
2930 * 2. Register amdgpu_dm_irq_handler().
2931 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2932 * coming from DC hardware.
2933 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2934 * for acknowledging and handling. */
2936 /* Use VBLANK interrupt */
2937 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2938 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2940 DRM_ERROR("Failed to add crtc irq id!\n");
2944 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2945 int_params.irq_source =
2946 dc_interrupt_to_irq_source(dc, i, 0);
2948 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2950 c_irq_params->adev = adev;
2951 c_irq_params->irq_src = int_params.irq_source;
2953 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2954 dm_crtc_high_irq, c_irq_params);
2957 /* Use VUPDATE interrupt */
2958 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2959 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2961 DRM_ERROR("Failed to add vupdate irq id!\n");
2965 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2966 int_params.irq_source =
2967 dc_interrupt_to_irq_source(dc, i, 0);
2969 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2971 c_irq_params->adev = adev;
2972 c_irq_params->irq_src = int_params.irq_source;
2974 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2975 dm_vupdate_high_irq, c_irq_params);
2978 /* Use GRPH_PFLIP interrupt */
2979 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2980 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2981 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2983 DRM_ERROR("Failed to add page flip irq id!\n");
2987 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2988 int_params.irq_source =
2989 dc_interrupt_to_irq_source(dc, i, 0);
2991 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2993 c_irq_params->adev = adev;
2994 c_irq_params->irq_src = int_params.irq_source;
2996 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2997 dm_pflip_high_irq, c_irq_params);
3002 r = amdgpu_irq_add_id(adev, client_id,
3003 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3005 DRM_ERROR("Failed to add hpd irq id!\n");
3009 register_hpd_handlers(adev);
3014 #if defined(CONFIG_DRM_AMD_DC_DCN)
3015 /* Register IRQ sources and initialize IRQ callbacks */
3016 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3018 struct dc *dc = adev->dm.dc;
3019 struct common_irq_params *c_irq_params;
3020 struct dc_interrupt_params int_params = {0};
3023 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3024 static const unsigned int vrtl_int_srcid[] = {
3025 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3026 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3027 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3028 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3029 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3030 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3034 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3035 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3038 * Actions of amdgpu_irq_add_id():
3039 * 1. Register a set() function with base driver.
3040 * Base driver will call set() function to enable/disable an
3041 * interrupt in DC hardware.
3042 * 2. Register amdgpu_dm_irq_handler().
3043 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3044 * coming from DC hardware.
3045 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3046 * for acknowledging and handling.
3049 /* Use VSTARTUP interrupt */
3050 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3051 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3053 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3056 DRM_ERROR("Failed to add crtc irq id!\n");
3060 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3061 int_params.irq_source =
3062 dc_interrupt_to_irq_source(dc, i, 0);
3064 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3066 c_irq_params->adev = adev;
3067 c_irq_params->irq_src = int_params.irq_source;
3069 amdgpu_dm_irq_register_interrupt(
3070 adev, &int_params, dm_crtc_high_irq, c_irq_params);
3073 /* Use otg vertical line interrupt */
3074 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3075 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3076 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3077 vrtl_int_srcid[i], &adev->vline0_irq);
3080 DRM_ERROR("Failed to add vline0 irq id!\n");
3084 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3085 int_params.irq_source =
3086 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3088 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3089 DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3093 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3094 - DC_IRQ_SOURCE_DC1_VLINE0];
3096 c_irq_params->adev = adev;
3097 c_irq_params->irq_src = int_params.irq_source;
3099 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3100 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3104 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3105 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3106 * to trigger at end of each vblank, regardless of state of the lock,
3107 * matching DCE behaviour.
3109 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3110 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3112 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3115 DRM_ERROR("Failed to add vupdate irq id!\n");
3119 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3120 int_params.irq_source =
3121 dc_interrupt_to_irq_source(dc, i, 0);
3123 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3125 c_irq_params->adev = adev;
3126 c_irq_params->irq_src = int_params.irq_source;
3128 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3129 dm_vupdate_high_irq, c_irq_params);
3132 /* Use GRPH_PFLIP interrupt */
3133 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3134 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3136 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3138 DRM_ERROR("Failed to add page flip irq id!\n");
3142 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3143 int_params.irq_source =
3144 dc_interrupt_to_irq_source(dc, i, 0);
3146 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3148 c_irq_params->adev = adev;
3149 c_irq_params->irq_src = int_params.irq_source;
3151 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3152 dm_pflip_high_irq, c_irq_params);
3156 if (dc->ctx->dmub_srv) {
3157 i = DCN_1_0__SRCID__DMCUB_OUTBOX_HIGH_PRIORITY_READY_INT;
3158 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->dmub_trace_irq);
3161 DRM_ERROR("Failed to add dmub trace irq id!\n");
3165 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3166 int_params.irq_source =
3167 dc_interrupt_to_irq_source(dc, i, 0);
3169 c_irq_params = &adev->dm.dmub_trace_params[0];
3171 c_irq_params->adev = adev;
3172 c_irq_params->irq_src = int_params.irq_source;
3174 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3175 dm_dmub_trace_high_irq, c_irq_params);
3179 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3182 DRM_ERROR("Failed to add hpd irq id!\n");
3186 register_hpd_handlers(adev);
3193 * Acquires the lock for the atomic state object and returns
3194 * the new atomic state.
3196 * This should only be called during atomic check.
3198 static int dm_atomic_get_state(struct drm_atomic_state *state,
3199 struct dm_atomic_state **dm_state)
3201 struct drm_device *dev = state->dev;
3202 struct amdgpu_device *adev = drm_to_adev(dev);
3203 struct amdgpu_display_manager *dm = &adev->dm;
3204 struct drm_private_state *priv_state;
3209 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3210 if (IS_ERR(priv_state))
3211 return PTR_ERR(priv_state);
3213 *dm_state = to_dm_atomic_state(priv_state);
3218 static struct dm_atomic_state *
3219 dm_atomic_get_new_state(struct drm_atomic_state *state)
3221 struct drm_device *dev = state->dev;
3222 struct amdgpu_device *adev = drm_to_adev(dev);
3223 struct amdgpu_display_manager *dm = &adev->dm;
3224 struct drm_private_obj *obj;
3225 struct drm_private_state *new_obj_state;
3228 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3229 if (obj->funcs == dm->atomic_obj.funcs)
3230 return to_dm_atomic_state(new_obj_state);
3236 static struct drm_private_state *
3237 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3239 struct dm_atomic_state *old_state, *new_state;
3241 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3245 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3247 old_state = to_dm_atomic_state(obj->state);
3249 if (old_state && old_state->context)
3250 new_state->context = dc_copy_state(old_state->context);
3252 if (!new_state->context) {
3257 return &new_state->base;
3260 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3261 struct drm_private_state *state)
3263 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3265 if (dm_state && dm_state->context)
3266 dc_release_state(dm_state->context);
3271 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3272 .atomic_duplicate_state = dm_atomic_duplicate_state,
3273 .atomic_destroy_state = dm_atomic_destroy_state,
3276 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3278 struct dm_atomic_state *state;
3281 adev->mode_info.mode_config_initialized = true;
3283 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3284 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3286 adev_to_drm(adev)->mode_config.max_width = 16384;
3287 adev_to_drm(adev)->mode_config.max_height = 16384;
3289 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3290 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3291 /* indicates support for immediate flip */
3292 adev_to_drm(adev)->mode_config.async_page_flip = true;
3294 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3296 state = kzalloc(sizeof(*state), GFP_KERNEL);
3300 state->context = dc_create_state(adev->dm.dc);
3301 if (!state->context) {
3306 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3308 drm_atomic_private_obj_init(adev_to_drm(adev),
3309 &adev->dm.atomic_obj,
3311 &dm_atomic_state_funcs);
3313 r = amdgpu_display_modeset_create_props(adev);
3315 dc_release_state(state->context);
3320 r = amdgpu_dm_audio_init(adev);
3322 dc_release_state(state->context);
3330 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3331 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3332 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3334 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3335 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3337 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3339 #if defined(CONFIG_ACPI)
3340 struct amdgpu_dm_backlight_caps caps;
3342 memset(&caps, 0, sizeof(caps));
3344 if (dm->backlight_caps.caps_valid)
3347 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3348 if (caps.caps_valid) {
3349 dm->backlight_caps.caps_valid = true;
3350 if (caps.aux_support)
3352 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3353 dm->backlight_caps.max_input_signal = caps.max_input_signal;
3355 dm->backlight_caps.min_input_signal =
3356 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3357 dm->backlight_caps.max_input_signal =
3358 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3361 if (dm->backlight_caps.aux_support)
3364 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3365 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3369 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3370 unsigned *min, unsigned *max)
3375 if (caps->aux_support) {
3376 // Firmware limits are in nits, DC API wants millinits.
3377 *max = 1000 * caps->aux_max_input_signal;
3378 *min = 1000 * caps->aux_min_input_signal;
3380 // Firmware limits are 8-bit, PWM control is 16-bit.
3381 *max = 0x101 * caps->max_input_signal;
3382 *min = 0x101 * caps->min_input_signal;
3387 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3388 uint32_t brightness)
3392 if (!get_brightness_range(caps, &min, &max))
3395 // Rescale 0..255 to min..max
3396 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3397 AMDGPU_MAX_BL_LEVEL);
3400 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3401 uint32_t brightness)
3405 if (!get_brightness_range(caps, &min, &max))
3408 if (brightness < min)
3410 // Rescale min..max to 0..255
3411 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3415 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3417 struct amdgpu_display_manager *dm = bl_get_data(bd);
3418 struct amdgpu_dm_backlight_caps caps;
3419 struct dc_link *link = NULL;
3423 amdgpu_dm_update_backlight_caps(dm);
3424 caps = dm->backlight_caps;
3426 link = (struct dc_link *)dm->backlight_link;
3428 brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3429 // Change brightness based on AUX property
3430 if (caps.aux_support)
3431 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3432 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3434 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3439 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3441 struct amdgpu_display_manager *dm = bl_get_data(bd);
3442 struct amdgpu_dm_backlight_caps caps;
3444 amdgpu_dm_update_backlight_caps(dm);
3445 caps = dm->backlight_caps;
3447 if (caps.aux_support) {
3448 struct dc_link *link = (struct dc_link *)dm->backlight_link;
3452 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3454 return bd->props.brightness;
3455 return convert_brightness_to_user(&caps, avg);
3457 int ret = dc_link_get_backlight_level(dm->backlight_link);
3459 if (ret == DC_ERROR_UNEXPECTED)
3460 return bd->props.brightness;
3461 return convert_brightness_to_user(&caps, ret);
3465 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3466 .options = BL_CORE_SUSPENDRESUME,
3467 .get_brightness = amdgpu_dm_backlight_get_brightness,
3468 .update_status = amdgpu_dm_backlight_update_status,
3472 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3475 struct backlight_properties props = { 0 };
3477 amdgpu_dm_update_backlight_caps(dm);
3479 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3480 props.brightness = AMDGPU_MAX_BL_LEVEL;
3481 props.type = BACKLIGHT_RAW;
3483 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3484 adev_to_drm(dm->adev)->primary->index);
3486 dm->backlight_dev = backlight_device_register(bl_name,
3487 adev_to_drm(dm->adev)->dev,
3489 &amdgpu_dm_backlight_ops,
3492 if (IS_ERR(dm->backlight_dev))
3493 DRM_ERROR("DM: Backlight registration failed!\n");
3495 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3500 static int initialize_plane(struct amdgpu_display_manager *dm,
3501 struct amdgpu_mode_info *mode_info, int plane_id,
3502 enum drm_plane_type plane_type,
3503 const struct dc_plane_cap *plane_cap)
3505 struct drm_plane *plane;
3506 unsigned long possible_crtcs;
3509 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3511 DRM_ERROR("KMS: Failed to allocate plane\n");
3514 plane->type = plane_type;
3517 * HACK: IGT tests expect that the primary plane for a CRTC
3518 * can only have one possible CRTC. Only expose support for
3519 * any CRTC if they're not going to be used as a primary plane
3520 * for a CRTC - like overlay or underlay planes.
3522 possible_crtcs = 1 << plane_id;
3523 if (plane_id >= dm->dc->caps.max_streams)
3524 possible_crtcs = 0xff;
3526 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3529 DRM_ERROR("KMS: Failed to initialize plane\n");
3535 mode_info->planes[plane_id] = plane;
3541 static void register_backlight_device(struct amdgpu_display_manager *dm,
3542 struct dc_link *link)
3544 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3545 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3547 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3548 link->type != dc_connection_none) {
3550 * Event if registration failed, we should continue with
3551 * DM initialization because not having a backlight control
3552 * is better then a black screen.
3554 amdgpu_dm_register_backlight_device(dm);
3556 if (dm->backlight_dev)
3557 dm->backlight_link = link;
3564 * In this architecture, the association
3565 * connector -> encoder -> crtc
3566 * id not really requried. The crtc and connector will hold the
3567 * display_index as an abstraction to use with DAL component
3569 * Returns 0 on success
3571 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3573 struct amdgpu_display_manager *dm = &adev->dm;
3575 struct amdgpu_dm_connector *aconnector = NULL;
3576 struct amdgpu_encoder *aencoder = NULL;
3577 struct amdgpu_mode_info *mode_info = &adev->mode_info;
3579 int32_t primary_planes;
3580 enum dc_connection_type new_connection_type = dc_connection_none;
3581 const struct dc_plane_cap *plane;
3583 dm->display_indexes_num = dm->dc->caps.max_streams;
3584 /* Update the actual used number of crtc */
3585 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3587 link_cnt = dm->dc->caps.max_links;
3588 if (amdgpu_dm_mode_config_init(dm->adev)) {
3589 DRM_ERROR("DM: Failed to initialize mode config\n");
3593 /* There is one primary plane per CRTC */
3594 primary_planes = dm->dc->caps.max_streams;
3595 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3598 * Initialize primary planes, implicit planes for legacy IOCTLS.
3599 * Order is reversed to match iteration order in atomic check.
3601 for (i = (primary_planes - 1); i >= 0; i--) {
3602 plane = &dm->dc->caps.planes[i];
3604 if (initialize_plane(dm, mode_info, i,
3605 DRM_PLANE_TYPE_PRIMARY, plane)) {
3606 DRM_ERROR("KMS: Failed to initialize primary plane\n");
3612 * Initialize overlay planes, index starting after primary planes.
3613 * These planes have a higher DRM index than the primary planes since
3614 * they should be considered as having a higher z-order.
3615 * Order is reversed to match iteration order in atomic check.
3617 * Only support DCN for now, and only expose one so we don't encourage
3618 * userspace to use up all the pipes.
3620 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3621 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3623 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3626 if (!plane->blends_with_above || !plane->blends_with_below)
3629 if (!plane->pixel_format_support.argb8888)
3632 if (initialize_plane(dm, NULL, primary_planes + i,
3633 DRM_PLANE_TYPE_OVERLAY, plane)) {
3634 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3638 /* Only create one overlay plane. */
3642 for (i = 0; i < dm->dc->caps.max_streams; i++)
3643 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3644 DRM_ERROR("KMS: Failed to initialize crtc\n");
3648 /* loops over all connectors on the board */
3649 for (i = 0; i < link_cnt; i++) {
3650 struct dc_link *link = NULL;
3652 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3654 "KMS: Cannot support more than %d display indexes\n",
3655 AMDGPU_DM_MAX_DISPLAY_INDEX);
3659 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3663 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3667 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3668 DRM_ERROR("KMS: Failed to initialize encoder\n");
3672 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3673 DRM_ERROR("KMS: Failed to initialize connector\n");
3677 link = dc_get_link_at_index(dm->dc, i);
3679 if (!dc_link_detect_sink(link, &new_connection_type))
3680 DRM_ERROR("KMS: Failed to detect connector\n");
3682 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3683 emulated_link_detect(link);
3684 amdgpu_dm_update_connector_after_detect(aconnector);
3686 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3687 amdgpu_dm_update_connector_after_detect(aconnector);
3688 register_backlight_device(dm, link);
3689 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3690 amdgpu_dm_set_psr_caps(link);
3696 /* Software is initialized. Now we can register interrupt handlers. */
3697 switch (adev->asic_type) {
3698 #if defined(CONFIG_DRM_AMD_DC_SI)
3703 if (dce60_register_irq_handlers(dm->adev)) {
3704 DRM_ERROR("DM: Failed to initialize IRQ\n");
3718 case CHIP_POLARIS11:
3719 case CHIP_POLARIS10:
3720 case CHIP_POLARIS12:
3725 if (dce110_register_irq_handlers(dm->adev)) {
3726 DRM_ERROR("DM: Failed to initialize IRQ\n");
3730 #if defined(CONFIG_DRM_AMD_DC_DCN)
3736 case CHIP_SIENNA_CICHLID:
3737 case CHIP_NAVY_FLOUNDER:
3738 case CHIP_DIMGREY_CAVEFISH:
3740 if (dcn10_register_irq_handlers(dm->adev)) {
3741 DRM_ERROR("DM: Failed to initialize IRQ\n");
3747 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3759 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3761 drm_mode_config_cleanup(dm->ddev);
3762 drm_atomic_private_obj_fini(&dm->atomic_obj);
3766 /******************************************************************************
3767 * amdgpu_display_funcs functions
3768 *****************************************************************************/
3771 * dm_bandwidth_update - program display watermarks
3773 * @adev: amdgpu_device pointer
3775 * Calculate and program the display watermarks and line buffer allocation.
3777 static void dm_bandwidth_update(struct amdgpu_device *adev)
3779 /* TODO: implement later */
3782 static const struct amdgpu_display_funcs dm_display_funcs = {
3783 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3784 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3785 .backlight_set_level = NULL, /* never called for DC */
3786 .backlight_get_level = NULL, /* never called for DC */
3787 .hpd_sense = NULL,/* called unconditionally */
3788 .hpd_set_polarity = NULL, /* called unconditionally */
3789 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3790 .page_flip_get_scanoutpos =
3791 dm_crtc_get_scanoutpos,/* called unconditionally */
3792 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3793 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3796 #if defined(CONFIG_DEBUG_KERNEL_DC)
3798 static ssize_t s3_debug_store(struct device *device,
3799 struct device_attribute *attr,
3805 struct drm_device *drm_dev = dev_get_drvdata(device);
3806 struct amdgpu_device *adev = drm_to_adev(drm_dev);
3808 ret = kstrtoint(buf, 0, &s3_state);
3813 drm_kms_helper_hotplug_event(adev_to_drm(adev));
3818 return ret == 0 ? count : 0;
3821 DEVICE_ATTR_WO(s3_debug);
3825 static int dm_early_init(void *handle)
3827 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3829 switch (adev->asic_type) {
3830 #if defined(CONFIG_DRM_AMD_DC_SI)
3834 adev->mode_info.num_crtc = 6;
3835 adev->mode_info.num_hpd = 6;
3836 adev->mode_info.num_dig = 6;
3839 adev->mode_info.num_crtc = 2;
3840 adev->mode_info.num_hpd = 2;
3841 adev->mode_info.num_dig = 2;
3846 adev->mode_info.num_crtc = 6;
3847 adev->mode_info.num_hpd = 6;
3848 adev->mode_info.num_dig = 6;
3851 adev->mode_info.num_crtc = 4;
3852 adev->mode_info.num_hpd = 6;
3853 adev->mode_info.num_dig = 7;
3857 adev->mode_info.num_crtc = 2;
3858 adev->mode_info.num_hpd = 6;
3859 adev->mode_info.num_dig = 6;
3863 adev->mode_info.num_crtc = 6;
3864 adev->mode_info.num_hpd = 6;
3865 adev->mode_info.num_dig = 7;
3868 adev->mode_info.num_crtc = 3;
3869 adev->mode_info.num_hpd = 6;
3870 adev->mode_info.num_dig = 9;
3873 adev->mode_info.num_crtc = 2;
3874 adev->mode_info.num_hpd = 6;
3875 adev->mode_info.num_dig = 9;
3877 case CHIP_POLARIS11:
3878 case CHIP_POLARIS12:
3879 adev->mode_info.num_crtc = 5;
3880 adev->mode_info.num_hpd = 5;
3881 adev->mode_info.num_dig = 5;
3883 case CHIP_POLARIS10:
3885 adev->mode_info.num_crtc = 6;
3886 adev->mode_info.num_hpd = 6;
3887 adev->mode_info.num_dig = 6;
3892 adev->mode_info.num_crtc = 6;
3893 adev->mode_info.num_hpd = 6;
3894 adev->mode_info.num_dig = 6;
3896 #if defined(CONFIG_DRM_AMD_DC_DCN)
3900 adev->mode_info.num_crtc = 4;
3901 adev->mode_info.num_hpd = 4;
3902 adev->mode_info.num_dig = 4;
3906 case CHIP_SIENNA_CICHLID:
3907 case CHIP_NAVY_FLOUNDER:
3908 adev->mode_info.num_crtc = 6;
3909 adev->mode_info.num_hpd = 6;
3910 adev->mode_info.num_dig = 6;
3913 case CHIP_DIMGREY_CAVEFISH:
3914 adev->mode_info.num_crtc = 5;
3915 adev->mode_info.num_hpd = 5;
3916 adev->mode_info.num_dig = 5;
3920 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3924 amdgpu_dm_set_irq_funcs(adev);
3926 if (adev->mode_info.funcs == NULL)
3927 adev->mode_info.funcs = &dm_display_funcs;
3930 * Note: Do NOT change adev->audio_endpt_rreg and
3931 * adev->audio_endpt_wreg because they are initialised in
3932 * amdgpu_device_init()
3934 #if defined(CONFIG_DEBUG_KERNEL_DC)
3936 adev_to_drm(adev)->dev,
3937 &dev_attr_s3_debug);
3943 static bool modeset_required(struct drm_crtc_state *crtc_state,
3944 struct dc_stream_state *new_stream,
3945 struct dc_stream_state *old_stream)
3947 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3950 static bool modereset_required(struct drm_crtc_state *crtc_state)
3952 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3955 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3957 drm_encoder_cleanup(encoder);
3961 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3962 .destroy = amdgpu_dm_encoder_destroy,
3966 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
3967 struct drm_framebuffer *fb,
3968 int *min_downscale, int *max_upscale)
3970 struct amdgpu_device *adev = drm_to_adev(dev);
3971 struct dc *dc = adev->dm.dc;
3972 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
3973 struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
3975 switch (fb->format->format) {
3976 case DRM_FORMAT_P010:
3977 case DRM_FORMAT_NV12:
3978 case DRM_FORMAT_NV21:
3979 *max_upscale = plane_cap->max_upscale_factor.nv12;
3980 *min_downscale = plane_cap->max_downscale_factor.nv12;
3983 case DRM_FORMAT_XRGB16161616F:
3984 case DRM_FORMAT_ARGB16161616F:
3985 case DRM_FORMAT_XBGR16161616F:
3986 case DRM_FORMAT_ABGR16161616F:
3987 *max_upscale = plane_cap->max_upscale_factor.fp16;
3988 *min_downscale = plane_cap->max_downscale_factor.fp16;
3992 *max_upscale = plane_cap->max_upscale_factor.argb8888;
3993 *min_downscale = plane_cap->max_downscale_factor.argb8888;
3998 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
3999 * scaling factor of 1.0 == 1000 units.
4001 if (*max_upscale == 1)
4002 *max_upscale = 1000;
4004 if (*min_downscale == 1)
4005 *min_downscale = 1000;
4009 static int fill_dc_scaling_info(const struct drm_plane_state *state,
4010 struct dc_scaling_info *scaling_info)
4012 int scale_w, scale_h, min_downscale, max_upscale;
4014 memset(scaling_info, 0, sizeof(*scaling_info));
4016 /* Source is fixed 16.16 but we ignore mantissa for now... */
4017 scaling_info->src_rect.x = state->src_x >> 16;
4018 scaling_info->src_rect.y = state->src_y >> 16;
4021 * For reasons we don't (yet) fully understand a non-zero
4022 * src_y coordinate into an NV12 buffer can cause a
4023 * system hang. To avoid hangs (and maybe be overly cautious)
4024 * let's reject both non-zero src_x and src_y.
4026 * We currently know of only one use-case to reproduce a
4027 * scenario with non-zero src_x and src_y for NV12, which
4028 * is to gesture the YouTube Android app into full screen
4032 state->fb->format->format == DRM_FORMAT_NV12 &&
4033 (scaling_info->src_rect.x != 0 ||
4034 scaling_info->src_rect.y != 0))
4037 scaling_info->src_rect.width = state->src_w >> 16;
4038 if (scaling_info->src_rect.width == 0)
4041 scaling_info->src_rect.height = state->src_h >> 16;
4042 if (scaling_info->src_rect.height == 0)
4045 scaling_info->dst_rect.x = state->crtc_x;
4046 scaling_info->dst_rect.y = state->crtc_y;
4048 if (state->crtc_w == 0)
4051 scaling_info->dst_rect.width = state->crtc_w;
4053 if (state->crtc_h == 0)
4056 scaling_info->dst_rect.height = state->crtc_h;
4058 /* DRM doesn't specify clipping on destination output. */
4059 scaling_info->clip_rect = scaling_info->dst_rect;
4061 /* Validate scaling per-format with DC plane caps */
4062 if (state->plane && state->plane->dev && state->fb) {
4063 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4064 &min_downscale, &max_upscale);
4066 min_downscale = 250;
4067 max_upscale = 16000;
4070 scale_w = scaling_info->dst_rect.width * 1000 /
4071 scaling_info->src_rect.width;
4073 if (scale_w < min_downscale || scale_w > max_upscale)
4076 scale_h = scaling_info->dst_rect.height * 1000 /
4077 scaling_info->src_rect.height;
4079 if (scale_h < min_downscale || scale_h > max_upscale)
4083 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4084 * assume reasonable defaults based on the format.
4091 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4092 uint64_t tiling_flags)
4094 /* Fill GFX8 params */
4095 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4096 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4098 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4099 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4100 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4101 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4102 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4104 /* XXX fix me for VI */
4105 tiling_info->gfx8.num_banks = num_banks;
4106 tiling_info->gfx8.array_mode =
4107 DC_ARRAY_2D_TILED_THIN1;
4108 tiling_info->gfx8.tile_split = tile_split;
4109 tiling_info->gfx8.bank_width = bankw;
4110 tiling_info->gfx8.bank_height = bankh;
4111 tiling_info->gfx8.tile_aspect = mtaspect;
4112 tiling_info->gfx8.tile_mode =
4113 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4114 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4115 == DC_ARRAY_1D_TILED_THIN1) {
4116 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4119 tiling_info->gfx8.pipe_config =
4120 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4124 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4125 union dc_tiling_info *tiling_info)
4127 tiling_info->gfx9.num_pipes =
4128 adev->gfx.config.gb_addr_config_fields.num_pipes;
4129 tiling_info->gfx9.num_banks =
4130 adev->gfx.config.gb_addr_config_fields.num_banks;
4131 tiling_info->gfx9.pipe_interleave =
4132 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4133 tiling_info->gfx9.num_shader_engines =
4134 adev->gfx.config.gb_addr_config_fields.num_se;
4135 tiling_info->gfx9.max_compressed_frags =
4136 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4137 tiling_info->gfx9.num_rb_per_se =
4138 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4139 tiling_info->gfx9.shaderEnable = 1;
4140 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4141 adev->asic_type == CHIP_NAVY_FLOUNDER ||
4142 adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4143 adev->asic_type == CHIP_VANGOGH)
4144 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4148 validate_dcc(struct amdgpu_device *adev,
4149 const enum surface_pixel_format format,
4150 const enum dc_rotation_angle rotation,
4151 const union dc_tiling_info *tiling_info,
4152 const struct dc_plane_dcc_param *dcc,
4153 const struct dc_plane_address *address,
4154 const struct plane_size *plane_size)
4156 struct dc *dc = adev->dm.dc;
4157 struct dc_dcc_surface_param input;
4158 struct dc_surface_dcc_cap output;
4160 memset(&input, 0, sizeof(input));
4161 memset(&output, 0, sizeof(output));
4166 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4167 !dc->cap_funcs.get_dcc_compression_cap)
4170 input.format = format;
4171 input.surface_size.width = plane_size->surface_size.width;
4172 input.surface_size.height = plane_size->surface_size.height;
4173 input.swizzle_mode = tiling_info->gfx9.swizzle;
4175 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4176 input.scan = SCAN_DIRECTION_HORIZONTAL;
4177 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4178 input.scan = SCAN_DIRECTION_VERTICAL;
4180 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4183 if (!output.capable)
4186 if (dcc->independent_64b_blks == 0 &&
4187 output.grph.rgb.independent_64b_blks != 0)
4194 modifier_has_dcc(uint64_t modifier)
4196 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4200 modifier_gfx9_swizzle_mode(uint64_t modifier)
4202 if (modifier == DRM_FORMAT_MOD_LINEAR)
4205 return AMD_FMT_MOD_GET(TILE, modifier);
4208 static const struct drm_format_info *
4209 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4211 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4215 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4216 union dc_tiling_info *tiling_info,
4219 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4220 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4221 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4222 unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4224 fill_gfx9_tiling_info_from_device(adev, tiling_info);
4226 if (!IS_AMD_FMT_MOD(modifier))
4229 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4230 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4232 if (adev->family >= AMDGPU_FAMILY_NV) {
4233 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4235 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4237 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4241 enum dm_micro_swizzle {
4242 MICRO_SWIZZLE_Z = 0,
4243 MICRO_SWIZZLE_S = 1,
4244 MICRO_SWIZZLE_D = 2,
4248 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4252 struct amdgpu_device *adev = drm_to_adev(plane->dev);
4253 const struct drm_format_info *info = drm_format_info(format);
4256 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4262 * We always have to allow these modifiers:
4263 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4264 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4266 if (modifier == DRM_FORMAT_MOD_LINEAR ||
4267 modifier == DRM_FORMAT_MOD_INVALID) {
4271 /* Check that the modifier is on the list of the plane's supported modifiers. */
4272 for (i = 0; i < plane->modifier_count; i++) {
4273 if (modifier == plane->modifiers[i])
4276 if (i == plane->modifier_count)
4280 * For D swizzle the canonical modifier depends on the bpp, so check
4283 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4284 adev->family >= AMDGPU_FAMILY_NV) {
4285 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4289 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4293 if (modifier_has_dcc(modifier)) {
4294 /* Per radeonsi comments 16/64 bpp are more complicated. */
4295 if (info->cpp[0] != 4)
4297 /* We support multi-planar formats, but not when combined with
4298 * additional DCC metadata planes. */
4299 if (info->num_planes > 1)
4307 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4312 if (*cap - *size < 1) {
4313 uint64_t new_cap = *cap * 2;
4314 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4322 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4328 (*mods)[*size] = mod;
4333 add_gfx9_modifiers(const struct amdgpu_device *adev,
4334 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4336 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4337 int pipe_xor_bits = min(8, pipes +
4338 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4339 int bank_xor_bits = min(8 - pipe_xor_bits,
4340 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4341 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4342 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4345 if (adev->family == AMDGPU_FAMILY_RV) {
4346 /* Raven2 and later */
4347 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4350 * No _D DCC swizzles yet because we only allow 32bpp, which
4351 * doesn't support _D on DCN
4354 if (has_constant_encode) {
4355 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4356 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4357 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4358 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4359 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4360 AMD_FMT_MOD_SET(DCC, 1) |
4361 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4362 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4363 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4366 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4367 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4368 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4369 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4370 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4371 AMD_FMT_MOD_SET(DCC, 1) |
4372 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4373 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4374 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4376 if (has_constant_encode) {
4377 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4378 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4379 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4380 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4381 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4382 AMD_FMT_MOD_SET(DCC, 1) |
4383 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4384 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4385 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4387 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4388 AMD_FMT_MOD_SET(RB, rb) |
4389 AMD_FMT_MOD_SET(PIPE, pipes));
4392 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4393 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4394 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4395 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4396 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4397 AMD_FMT_MOD_SET(DCC, 1) |
4398 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4399 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4400 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4401 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4402 AMD_FMT_MOD_SET(RB, rb) |
4403 AMD_FMT_MOD_SET(PIPE, pipes));
4407 * Only supported for 64bpp on Raven, will be filtered on format in
4408 * dm_plane_format_mod_supported.
4410 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4411 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4412 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4413 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4414 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4416 if (adev->family == AMDGPU_FAMILY_RV) {
4417 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4418 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4419 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4420 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4421 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4425 * Only supported for 64bpp on Raven, will be filtered on format in
4426 * dm_plane_format_mod_supported.
4428 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4429 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4430 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4432 if (adev->family == AMDGPU_FAMILY_RV) {
4433 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4434 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4435 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4440 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4441 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4443 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4445 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4446 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4447 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4448 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4449 AMD_FMT_MOD_SET(DCC, 1) |
4450 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4451 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4452 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4454 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4455 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4456 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4457 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4458 AMD_FMT_MOD_SET(DCC, 1) |
4459 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4460 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4461 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4462 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4464 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4465 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4466 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4467 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4469 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4470 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4471 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4472 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4475 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4476 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4477 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4478 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4480 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4481 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4482 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4486 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4487 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4489 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4490 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4492 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4493 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4494 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4495 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4496 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4497 AMD_FMT_MOD_SET(DCC, 1) |
4498 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4499 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4500 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4501 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4503 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4504 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4505 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4506 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4507 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4508 AMD_FMT_MOD_SET(DCC, 1) |
4509 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4510 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4511 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4512 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4513 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4515 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4516 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4517 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4518 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4519 AMD_FMT_MOD_SET(PACKERS, pkrs));
4521 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4522 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4523 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4524 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4525 AMD_FMT_MOD_SET(PACKERS, pkrs));
4527 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4528 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4529 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4530 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4532 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4533 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4534 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4538 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4540 uint64_t size = 0, capacity = 128;
4543 /* We have not hooked up any pre-GFX9 modifiers. */
4544 if (adev->family < AMDGPU_FAMILY_AI)
4547 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4549 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4550 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4551 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4552 return *mods ? 0 : -ENOMEM;
4555 switch (adev->family) {
4556 case AMDGPU_FAMILY_AI:
4557 case AMDGPU_FAMILY_RV:
4558 add_gfx9_modifiers(adev, mods, &size, &capacity);
4560 case AMDGPU_FAMILY_NV:
4561 case AMDGPU_FAMILY_VGH:
4562 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4563 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4565 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4569 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4571 /* INVALID marks the end of the list. */
4572 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4581 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4582 const struct amdgpu_framebuffer *afb,
4583 const enum surface_pixel_format format,
4584 const enum dc_rotation_angle rotation,
4585 const struct plane_size *plane_size,
4586 union dc_tiling_info *tiling_info,
4587 struct dc_plane_dcc_param *dcc,
4588 struct dc_plane_address *address,
4589 const bool force_disable_dcc)
4591 const uint64_t modifier = afb->base.modifier;
4594 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4595 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4597 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4598 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4601 dcc->meta_pitch = afb->base.pitches[1];
4602 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4604 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4605 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4608 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4616 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4617 const struct amdgpu_framebuffer *afb,
4618 const enum surface_pixel_format format,
4619 const enum dc_rotation_angle rotation,
4620 const uint64_t tiling_flags,
4621 union dc_tiling_info *tiling_info,
4622 struct plane_size *plane_size,
4623 struct dc_plane_dcc_param *dcc,
4624 struct dc_plane_address *address,
4626 bool force_disable_dcc)
4628 const struct drm_framebuffer *fb = &afb->base;
4631 memset(tiling_info, 0, sizeof(*tiling_info));
4632 memset(plane_size, 0, sizeof(*plane_size));
4633 memset(dcc, 0, sizeof(*dcc));
4634 memset(address, 0, sizeof(*address));
4636 address->tmz_surface = tmz_surface;
4638 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4639 uint64_t addr = afb->address + fb->offsets[0];
4641 plane_size->surface_size.x = 0;
4642 plane_size->surface_size.y = 0;
4643 plane_size->surface_size.width = fb->width;
4644 plane_size->surface_size.height = fb->height;
4645 plane_size->surface_pitch =
4646 fb->pitches[0] / fb->format->cpp[0];
4648 address->type = PLN_ADDR_TYPE_GRAPHICS;
4649 address->grph.addr.low_part = lower_32_bits(addr);
4650 address->grph.addr.high_part = upper_32_bits(addr);
4651 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4652 uint64_t luma_addr = afb->address + fb->offsets[0];
4653 uint64_t chroma_addr = afb->address + fb->offsets[1];
4655 plane_size->surface_size.x = 0;
4656 plane_size->surface_size.y = 0;
4657 plane_size->surface_size.width = fb->width;
4658 plane_size->surface_size.height = fb->height;
4659 plane_size->surface_pitch =
4660 fb->pitches[0] / fb->format->cpp[0];
4662 plane_size->chroma_size.x = 0;
4663 plane_size->chroma_size.y = 0;
4664 /* TODO: set these based on surface format */
4665 plane_size->chroma_size.width = fb->width / 2;
4666 plane_size->chroma_size.height = fb->height / 2;
4668 plane_size->chroma_pitch =
4669 fb->pitches[1] / fb->format->cpp[1];
4671 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4672 address->video_progressive.luma_addr.low_part =
4673 lower_32_bits(luma_addr);
4674 address->video_progressive.luma_addr.high_part =
4675 upper_32_bits(luma_addr);
4676 address->video_progressive.chroma_addr.low_part =
4677 lower_32_bits(chroma_addr);
4678 address->video_progressive.chroma_addr.high_part =
4679 upper_32_bits(chroma_addr);
4682 if (adev->family >= AMDGPU_FAMILY_AI) {
4683 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4684 rotation, plane_size,
4691 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4698 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4699 bool *per_pixel_alpha, bool *global_alpha,
4700 int *global_alpha_value)
4702 *per_pixel_alpha = false;
4703 *global_alpha = false;
4704 *global_alpha_value = 0xff;
4706 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4709 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4710 static const uint32_t alpha_formats[] = {
4711 DRM_FORMAT_ARGB8888,
4712 DRM_FORMAT_RGBA8888,
4713 DRM_FORMAT_ABGR8888,
4715 uint32_t format = plane_state->fb->format->format;
4718 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4719 if (format == alpha_formats[i]) {
4720 *per_pixel_alpha = true;
4726 if (plane_state->alpha < 0xffff) {
4727 *global_alpha = true;
4728 *global_alpha_value = plane_state->alpha >> 8;
4733 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4734 const enum surface_pixel_format format,
4735 enum dc_color_space *color_space)
4739 *color_space = COLOR_SPACE_SRGB;
4741 /* DRM color properties only affect non-RGB formats. */
4742 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4745 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4747 switch (plane_state->color_encoding) {
4748 case DRM_COLOR_YCBCR_BT601:
4750 *color_space = COLOR_SPACE_YCBCR601;
4752 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4755 case DRM_COLOR_YCBCR_BT709:
4757 *color_space = COLOR_SPACE_YCBCR709;
4759 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4762 case DRM_COLOR_YCBCR_BT2020:
4764 *color_space = COLOR_SPACE_2020_YCBCR;
4777 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4778 const struct drm_plane_state *plane_state,
4779 const uint64_t tiling_flags,
4780 struct dc_plane_info *plane_info,
4781 struct dc_plane_address *address,
4783 bool force_disable_dcc)
4785 const struct drm_framebuffer *fb = plane_state->fb;
4786 const struct amdgpu_framebuffer *afb =
4787 to_amdgpu_framebuffer(plane_state->fb);
4790 memset(plane_info, 0, sizeof(*plane_info));
4792 switch (fb->format->format) {
4794 plane_info->format =
4795 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4797 case DRM_FORMAT_RGB565:
4798 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4800 case DRM_FORMAT_XRGB8888:
4801 case DRM_FORMAT_ARGB8888:
4802 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4804 case DRM_FORMAT_XRGB2101010:
4805 case DRM_FORMAT_ARGB2101010:
4806 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4808 case DRM_FORMAT_XBGR2101010:
4809 case DRM_FORMAT_ABGR2101010:
4810 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4812 case DRM_FORMAT_XBGR8888:
4813 case DRM_FORMAT_ABGR8888:
4814 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4816 case DRM_FORMAT_NV21:
4817 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4819 case DRM_FORMAT_NV12:
4820 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4822 case DRM_FORMAT_P010:
4823 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4825 case DRM_FORMAT_XRGB16161616F:
4826 case DRM_FORMAT_ARGB16161616F:
4827 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4829 case DRM_FORMAT_XBGR16161616F:
4830 case DRM_FORMAT_ABGR16161616F:
4831 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4835 "Unsupported screen format %p4cc\n",
4836 &fb->format->format);
4840 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4841 case DRM_MODE_ROTATE_0:
4842 plane_info->rotation = ROTATION_ANGLE_0;
4844 case DRM_MODE_ROTATE_90:
4845 plane_info->rotation = ROTATION_ANGLE_90;
4847 case DRM_MODE_ROTATE_180:
4848 plane_info->rotation = ROTATION_ANGLE_180;
4850 case DRM_MODE_ROTATE_270:
4851 plane_info->rotation = ROTATION_ANGLE_270;
4854 plane_info->rotation = ROTATION_ANGLE_0;
4858 plane_info->visible = true;
4859 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4861 plane_info->layer_index = 0;
4863 ret = fill_plane_color_attributes(plane_state, plane_info->format,
4864 &plane_info->color_space);
4868 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4869 plane_info->rotation, tiling_flags,
4870 &plane_info->tiling_info,
4871 &plane_info->plane_size,
4872 &plane_info->dcc, address, tmz_surface,
4877 fill_blending_from_plane_state(
4878 plane_state, &plane_info->per_pixel_alpha,
4879 &plane_info->global_alpha, &plane_info->global_alpha_value);
4884 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4885 struct dc_plane_state *dc_plane_state,
4886 struct drm_plane_state *plane_state,
4887 struct drm_crtc_state *crtc_state)
4889 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4890 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4891 struct dc_scaling_info scaling_info;
4892 struct dc_plane_info plane_info;
4894 bool force_disable_dcc = false;
4896 ret = fill_dc_scaling_info(plane_state, &scaling_info);
4900 dc_plane_state->src_rect = scaling_info.src_rect;
4901 dc_plane_state->dst_rect = scaling_info.dst_rect;
4902 dc_plane_state->clip_rect = scaling_info.clip_rect;
4903 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4905 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4906 ret = fill_dc_plane_info_and_addr(adev, plane_state,
4909 &dc_plane_state->address,
4915 dc_plane_state->format = plane_info.format;
4916 dc_plane_state->color_space = plane_info.color_space;
4917 dc_plane_state->format = plane_info.format;
4918 dc_plane_state->plane_size = plane_info.plane_size;
4919 dc_plane_state->rotation = plane_info.rotation;
4920 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4921 dc_plane_state->stereo_format = plane_info.stereo_format;
4922 dc_plane_state->tiling_info = plane_info.tiling_info;
4923 dc_plane_state->visible = plane_info.visible;
4924 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4925 dc_plane_state->global_alpha = plane_info.global_alpha;
4926 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4927 dc_plane_state->dcc = plane_info.dcc;
4928 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4929 dc_plane_state->flip_int_enabled = true;
4932 * Always set input transfer function, since plane state is refreshed
4935 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4942 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4943 const struct dm_connector_state *dm_state,
4944 struct dc_stream_state *stream)
4946 enum amdgpu_rmx_type rmx_type;
4948 struct rect src = { 0 }; /* viewport in composition space*/
4949 struct rect dst = { 0 }; /* stream addressable area */
4951 /* no mode. nothing to be done */
4955 /* Full screen scaling by default */
4956 src.width = mode->hdisplay;
4957 src.height = mode->vdisplay;
4958 dst.width = stream->timing.h_addressable;
4959 dst.height = stream->timing.v_addressable;
4962 rmx_type = dm_state->scaling;
4963 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4964 if (src.width * dst.height <
4965 src.height * dst.width) {
4966 /* height needs less upscaling/more downscaling */
4967 dst.width = src.width *
4968 dst.height / src.height;
4970 /* width needs less upscaling/more downscaling */
4971 dst.height = src.height *
4972 dst.width / src.width;
4974 } else if (rmx_type == RMX_CENTER) {
4978 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4979 dst.y = (stream->timing.v_addressable - dst.height) / 2;
4981 if (dm_state->underscan_enable) {
4982 dst.x += dm_state->underscan_hborder / 2;
4983 dst.y += dm_state->underscan_vborder / 2;
4984 dst.width -= dm_state->underscan_hborder;
4985 dst.height -= dm_state->underscan_vborder;
4992 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
4993 dst.x, dst.y, dst.width, dst.height);
4997 static enum dc_color_depth
4998 convert_color_depth_from_display_info(const struct drm_connector *connector,
4999 bool is_y420, int requested_bpc)
5006 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5007 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5009 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5011 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5014 bpc = (uint8_t)connector->display_info.bpc;
5015 /* Assume 8 bpc by default if no bpc is specified. */
5016 bpc = bpc ? bpc : 8;
5019 if (requested_bpc > 0) {
5021 * Cap display bpc based on the user requested value.
5023 * The value for state->max_bpc may not correctly updated
5024 * depending on when the connector gets added to the state
5025 * or if this was called outside of atomic check, so it
5026 * can't be used directly.
5028 bpc = min_t(u8, bpc, requested_bpc);
5030 /* Round down to the nearest even number. */
5031 bpc = bpc - (bpc & 1);
5037 * Temporary Work around, DRM doesn't parse color depth for
5038 * EDID revision before 1.4
5039 * TODO: Fix edid parsing
5041 return COLOR_DEPTH_888;
5043 return COLOR_DEPTH_666;
5045 return COLOR_DEPTH_888;
5047 return COLOR_DEPTH_101010;
5049 return COLOR_DEPTH_121212;
5051 return COLOR_DEPTH_141414;
5053 return COLOR_DEPTH_161616;
5055 return COLOR_DEPTH_UNDEFINED;
5059 static enum dc_aspect_ratio
5060 get_aspect_ratio(const struct drm_display_mode *mode_in)
5062 /* 1-1 mapping, since both enums follow the HDMI spec. */
5063 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5066 static enum dc_color_space
5067 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5069 enum dc_color_space color_space = COLOR_SPACE_SRGB;
5071 switch (dc_crtc_timing->pixel_encoding) {
5072 case PIXEL_ENCODING_YCBCR422:
5073 case PIXEL_ENCODING_YCBCR444:
5074 case PIXEL_ENCODING_YCBCR420:
5077 * 27030khz is the separation point between HDTV and SDTV
5078 * according to HDMI spec, we use YCbCr709 and YCbCr601
5081 if (dc_crtc_timing->pix_clk_100hz > 270300) {
5082 if (dc_crtc_timing->flags.Y_ONLY)
5084 COLOR_SPACE_YCBCR709_LIMITED;
5086 color_space = COLOR_SPACE_YCBCR709;
5088 if (dc_crtc_timing->flags.Y_ONLY)
5090 COLOR_SPACE_YCBCR601_LIMITED;
5092 color_space = COLOR_SPACE_YCBCR601;
5097 case PIXEL_ENCODING_RGB:
5098 color_space = COLOR_SPACE_SRGB;
5109 static bool adjust_colour_depth_from_display_info(
5110 struct dc_crtc_timing *timing_out,
5111 const struct drm_display_info *info)
5113 enum dc_color_depth depth = timing_out->display_color_depth;
5116 normalized_clk = timing_out->pix_clk_100hz / 10;
5117 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5118 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5119 normalized_clk /= 2;
5120 /* Adjusting pix clock following on HDMI spec based on colour depth */
5122 case COLOR_DEPTH_888:
5124 case COLOR_DEPTH_101010:
5125 normalized_clk = (normalized_clk * 30) / 24;
5127 case COLOR_DEPTH_121212:
5128 normalized_clk = (normalized_clk * 36) / 24;
5130 case COLOR_DEPTH_161616:
5131 normalized_clk = (normalized_clk * 48) / 24;
5134 /* The above depths are the only ones valid for HDMI. */
5137 if (normalized_clk <= info->max_tmds_clock) {
5138 timing_out->display_color_depth = depth;
5141 } while (--depth > COLOR_DEPTH_666);
5145 static void fill_stream_properties_from_drm_display_mode(
5146 struct dc_stream_state *stream,
5147 const struct drm_display_mode *mode_in,
5148 const struct drm_connector *connector,
5149 const struct drm_connector_state *connector_state,
5150 const struct dc_stream_state *old_stream,
5153 struct dc_crtc_timing *timing_out = &stream->timing;
5154 const struct drm_display_info *info = &connector->display_info;
5155 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5156 struct hdmi_vendor_infoframe hv_frame;
5157 struct hdmi_avi_infoframe avi_frame;
5159 memset(&hv_frame, 0, sizeof(hv_frame));
5160 memset(&avi_frame, 0, sizeof(avi_frame));
5162 timing_out->h_border_left = 0;
5163 timing_out->h_border_right = 0;
5164 timing_out->v_border_top = 0;
5165 timing_out->v_border_bottom = 0;
5166 /* TODO: un-hardcode */
5167 if (drm_mode_is_420_only(info, mode_in)
5168 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5169 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5170 else if (drm_mode_is_420_also(info, mode_in)
5171 && aconnector->force_yuv420_output)
5172 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5173 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5174 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5175 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5177 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5179 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5180 timing_out->display_color_depth = convert_color_depth_from_display_info(
5182 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5184 timing_out->scan_type = SCANNING_TYPE_NODATA;
5185 timing_out->hdmi_vic = 0;
5188 timing_out->vic = old_stream->timing.vic;
5189 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5190 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5192 timing_out->vic = drm_match_cea_mode(mode_in);
5193 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5194 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5195 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5196 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5199 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5200 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5201 timing_out->vic = avi_frame.video_code;
5202 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5203 timing_out->hdmi_vic = hv_frame.vic;
5206 if (is_freesync_video_mode(mode_in, aconnector)) {
5207 timing_out->h_addressable = mode_in->hdisplay;
5208 timing_out->h_total = mode_in->htotal;
5209 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5210 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5211 timing_out->v_total = mode_in->vtotal;
5212 timing_out->v_addressable = mode_in->vdisplay;
5213 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5214 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5215 timing_out->pix_clk_100hz = mode_in->clock * 10;
5217 timing_out->h_addressable = mode_in->crtc_hdisplay;
5218 timing_out->h_total = mode_in->crtc_htotal;
5219 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5220 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5221 timing_out->v_total = mode_in->crtc_vtotal;
5222 timing_out->v_addressable = mode_in->crtc_vdisplay;
5223 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5224 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5225 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5228 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5230 stream->output_color_space = get_output_color_space(timing_out);
5232 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5233 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5234 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5235 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5236 drm_mode_is_420_also(info, mode_in) &&
5237 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5238 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5239 adjust_colour_depth_from_display_info(timing_out, info);
5244 static void fill_audio_info(struct audio_info *audio_info,
5245 const struct drm_connector *drm_connector,
5246 const struct dc_sink *dc_sink)
5249 int cea_revision = 0;
5250 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5252 audio_info->manufacture_id = edid_caps->manufacturer_id;
5253 audio_info->product_id = edid_caps->product_id;
5255 cea_revision = drm_connector->display_info.cea_rev;
5257 strscpy(audio_info->display_name,
5258 edid_caps->display_name,
5259 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5261 if (cea_revision >= 3) {
5262 audio_info->mode_count = edid_caps->audio_mode_count;
5264 for (i = 0; i < audio_info->mode_count; ++i) {
5265 audio_info->modes[i].format_code =
5266 (enum audio_format_code)
5267 (edid_caps->audio_modes[i].format_code);
5268 audio_info->modes[i].channel_count =
5269 edid_caps->audio_modes[i].channel_count;
5270 audio_info->modes[i].sample_rates.all =
5271 edid_caps->audio_modes[i].sample_rate;
5272 audio_info->modes[i].sample_size =
5273 edid_caps->audio_modes[i].sample_size;
5277 audio_info->flags.all = edid_caps->speaker_flags;
5279 /* TODO: We only check for the progressive mode, check for interlace mode too */
5280 if (drm_connector->latency_present[0]) {
5281 audio_info->video_latency = drm_connector->video_latency[0];
5282 audio_info->audio_latency = drm_connector->audio_latency[0];
5285 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5290 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5291 struct drm_display_mode *dst_mode)
5293 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5294 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5295 dst_mode->crtc_clock = src_mode->crtc_clock;
5296 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5297 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5298 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
5299 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5300 dst_mode->crtc_htotal = src_mode->crtc_htotal;
5301 dst_mode->crtc_hskew = src_mode->crtc_hskew;
5302 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5303 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5304 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5305 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5306 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5310 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5311 const struct drm_display_mode *native_mode,
5314 if (scale_enabled) {
5315 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5316 } else if (native_mode->clock == drm_mode->clock &&
5317 native_mode->htotal == drm_mode->htotal &&
5318 native_mode->vtotal == drm_mode->vtotal) {
5319 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5321 /* no scaling nor amdgpu inserted, no need to patch */
5325 static struct dc_sink *
5326 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5328 struct dc_sink_init_data sink_init_data = { 0 };
5329 struct dc_sink *sink = NULL;
5330 sink_init_data.link = aconnector->dc_link;
5331 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5333 sink = dc_sink_create(&sink_init_data);
5335 DRM_ERROR("Failed to create sink!\n");
5338 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5343 static void set_multisync_trigger_params(
5344 struct dc_stream_state *stream)
5346 struct dc_stream_state *master = NULL;
5348 if (stream->triggered_crtc_reset.enabled) {
5349 master = stream->triggered_crtc_reset.event_source;
5350 stream->triggered_crtc_reset.event =
5351 master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5352 CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5353 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5357 static void set_master_stream(struct dc_stream_state *stream_set[],
5360 int j, highest_rfr = 0, master_stream = 0;
5362 for (j = 0; j < stream_count; j++) {
5363 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5364 int refresh_rate = 0;
5366 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5367 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5368 if (refresh_rate > highest_rfr) {
5369 highest_rfr = refresh_rate;
5374 for (j = 0; j < stream_count; j++) {
5376 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5380 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5383 struct dc_stream_state *stream;
5385 if (context->stream_count < 2)
5387 for (i = 0; i < context->stream_count ; i++) {
5388 if (!context->streams[i])
5391 * TODO: add a function to read AMD VSDB bits and set
5392 * crtc_sync_master.multi_sync_enabled flag
5393 * For now it's set to false
5397 set_master_stream(context->streams, context->stream_count);
5399 for (i = 0; i < context->stream_count ; i++) {
5400 stream = context->streams[i];
5405 set_multisync_trigger_params(stream);
5409 static struct drm_display_mode *
5410 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5411 bool use_probed_modes)
5413 struct drm_display_mode *m, *m_pref = NULL;
5414 u16 current_refresh, highest_refresh;
5415 struct list_head *list_head = use_probed_modes ?
5416 &aconnector->base.probed_modes :
5417 &aconnector->base.modes;
5419 if (aconnector->freesync_vid_base.clock != 0)
5420 return &aconnector->freesync_vid_base;
5422 /* Find the preferred mode */
5423 list_for_each_entry (m, list_head, head) {
5424 if (m->type & DRM_MODE_TYPE_PREFERRED) {
5431 /* Probably an EDID with no preferred mode. Fallback to first entry */
5432 m_pref = list_first_entry_or_null(
5433 &aconnector->base.modes, struct drm_display_mode, head);
5435 DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5440 highest_refresh = drm_mode_vrefresh(m_pref);
5443 * Find the mode with highest refresh rate with same resolution.
5444 * For some monitors, preferred mode is not the mode with highest
5445 * supported refresh rate.
5447 list_for_each_entry (m, list_head, head) {
5448 current_refresh = drm_mode_vrefresh(m);
5450 if (m->hdisplay == m_pref->hdisplay &&
5451 m->vdisplay == m_pref->vdisplay &&
5452 highest_refresh < current_refresh) {
5453 highest_refresh = current_refresh;
5458 aconnector->freesync_vid_base = *m_pref;
5462 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
5463 struct amdgpu_dm_connector *aconnector)
5465 struct drm_display_mode *high_mode;
5468 high_mode = get_highest_refresh_rate_mode(aconnector, false);
5469 if (!high_mode || !mode)
5472 timing_diff = high_mode->vtotal - mode->vtotal;
5474 if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5475 high_mode->hdisplay != mode->hdisplay ||
5476 high_mode->vdisplay != mode->vdisplay ||
5477 high_mode->hsync_start != mode->hsync_start ||
5478 high_mode->hsync_end != mode->hsync_end ||
5479 high_mode->htotal != mode->htotal ||
5480 high_mode->hskew != mode->hskew ||
5481 high_mode->vscan != mode->vscan ||
5482 high_mode->vsync_start - mode->vsync_start != timing_diff ||
5483 high_mode->vsync_end - mode->vsync_end != timing_diff)
5489 static struct dc_stream_state *
5490 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5491 const struct drm_display_mode *drm_mode,
5492 const struct dm_connector_state *dm_state,
5493 const struct dc_stream_state *old_stream,
5496 struct drm_display_mode *preferred_mode = NULL;
5497 struct drm_connector *drm_connector;
5498 const struct drm_connector_state *con_state =
5499 dm_state ? &dm_state->base : NULL;
5500 struct dc_stream_state *stream = NULL;
5501 struct drm_display_mode mode = *drm_mode;
5502 struct drm_display_mode saved_mode;
5503 struct drm_display_mode *freesync_mode = NULL;
5504 bool native_mode_found = false;
5505 bool recalculate_timing = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5507 int preferred_refresh = 0;
5508 #if defined(CONFIG_DRM_AMD_DC_DCN)
5509 struct dsc_dec_dpcd_caps dsc_caps;
5510 uint32_t link_bandwidth_kbps;
5512 struct dc_sink *sink = NULL;
5514 memset(&saved_mode, 0, sizeof(saved_mode));
5516 if (aconnector == NULL) {
5517 DRM_ERROR("aconnector is NULL!\n");
5521 drm_connector = &aconnector->base;
5523 if (!aconnector->dc_sink) {
5524 sink = create_fake_sink(aconnector);
5528 sink = aconnector->dc_sink;
5529 dc_sink_retain(sink);
5532 stream = dc_create_stream_for_sink(sink);
5534 if (stream == NULL) {
5535 DRM_ERROR("Failed to create stream for sink!\n");
5539 stream->dm_stream_context = aconnector;
5541 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5542 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5544 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5545 /* Search for preferred mode */
5546 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5547 native_mode_found = true;
5551 if (!native_mode_found)
5552 preferred_mode = list_first_entry_or_null(
5553 &aconnector->base.modes,
5554 struct drm_display_mode,
5557 mode_refresh = drm_mode_vrefresh(&mode);
5559 if (preferred_mode == NULL) {
5561 * This may not be an error, the use case is when we have no
5562 * usermode calls to reset and set mode upon hotplug. In this
5563 * case, we call set mode ourselves to restore the previous mode
5564 * and the modelist may not be filled in in time.
5566 DRM_DEBUG_DRIVER("No preferred mode found\n");
5568 recalculate_timing |= amdgpu_freesync_vid_mode &&
5569 is_freesync_video_mode(&mode, aconnector);
5570 if (recalculate_timing) {
5571 freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5573 mode = *freesync_mode;
5575 decide_crtc_timing_for_drm_display_mode(
5576 &mode, preferred_mode,
5577 dm_state ? (dm_state->scaling != RMX_OFF) : false);
5580 preferred_refresh = drm_mode_vrefresh(preferred_mode);
5583 if (recalculate_timing)
5584 drm_mode_set_crtcinfo(&saved_mode, 0);
5586 drm_mode_set_crtcinfo(&mode, 0);
5589 * If scaling is enabled and refresh rate didn't change
5590 * we copy the vic and polarities of the old timings
5592 if (!recalculate_timing || mode_refresh != preferred_refresh)
5593 fill_stream_properties_from_drm_display_mode(
5594 stream, &mode, &aconnector->base, con_state, NULL,
5597 fill_stream_properties_from_drm_display_mode(
5598 stream, &mode, &aconnector->base, con_state, old_stream,
5601 stream->timing.flags.DSC = 0;
5603 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5604 #if defined(CONFIG_DRM_AMD_DC_DCN)
5605 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5606 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5607 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5609 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5610 dc_link_get_link_cap(aconnector->dc_link));
5612 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5613 /* Set DSC policy according to dsc_clock_en */
5614 dc_dsc_policy_set_enable_dsc_when_not_needed(
5615 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5617 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5619 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5621 link_bandwidth_kbps,
5623 &stream->timing.dsc_cfg))
5624 stream->timing.flags.DSC = 1;
5625 /* Overwrite the stream flag if DSC is enabled through debugfs */
5626 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5627 stream->timing.flags.DSC = 1;
5629 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5630 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5632 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5633 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5635 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5636 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5641 update_stream_scaling_settings(&mode, dm_state, stream);
5644 &stream->audio_info,
5648 update_stream_signal(stream, sink);
5650 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5651 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5653 if (stream->link->psr_settings.psr_feature_enabled) {
5655 // should decide stream support vsc sdp colorimetry capability
5656 // before building vsc info packet
5658 stream->use_vsc_sdp_for_colorimetry = false;
5659 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5660 stream->use_vsc_sdp_for_colorimetry =
5661 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5663 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5664 stream->use_vsc_sdp_for_colorimetry = true;
5666 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5669 dc_sink_release(sink);
5674 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5676 drm_crtc_cleanup(crtc);
5680 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5681 struct drm_crtc_state *state)
5683 struct dm_crtc_state *cur = to_dm_crtc_state(state);
5685 /* TODO Destroy dc_stream objects are stream object is flattened */
5687 dc_stream_release(cur->stream);
5690 __drm_atomic_helper_crtc_destroy_state(state);
5696 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5698 struct dm_crtc_state *state;
5701 dm_crtc_destroy_state(crtc, crtc->state);
5703 state = kzalloc(sizeof(*state), GFP_KERNEL);
5704 if (WARN_ON(!state))
5707 __drm_atomic_helper_crtc_reset(crtc, &state->base);
5710 static struct drm_crtc_state *
5711 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5713 struct dm_crtc_state *state, *cur;
5715 cur = to_dm_crtc_state(crtc->state);
5717 if (WARN_ON(!crtc->state))
5720 state = kzalloc(sizeof(*state), GFP_KERNEL);
5724 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5727 state->stream = cur->stream;
5728 dc_stream_retain(state->stream);
5731 state->active_planes = cur->active_planes;
5732 state->vrr_infopacket = cur->vrr_infopacket;
5733 state->abm_level = cur->abm_level;
5734 state->vrr_supported = cur->vrr_supported;
5735 state->freesync_config = cur->freesync_config;
5736 state->cm_has_degamma = cur->cm_has_degamma;
5737 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5738 /* TODO Duplicate dc_stream after objects are stream object is flattened */
5740 return &state->base;
5743 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
5744 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
5746 crtc_debugfs_init(crtc);
5752 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5754 enum dc_irq_source irq_source;
5755 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5756 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5759 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5761 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5763 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
5764 acrtc->crtc_id, enable ? "en" : "dis", rc);
5768 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5770 enum dc_irq_source irq_source;
5771 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5772 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5773 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5774 #if defined(CONFIG_DRM_AMD_DC_DCN)
5775 struct amdgpu_display_manager *dm = &adev->dm;
5776 unsigned long flags;
5781 /* vblank irq on -> Only need vupdate irq in vrr mode */
5782 if (amdgpu_dm_vrr_active(acrtc_state))
5783 rc = dm_set_vupdate_irq(crtc, true);
5785 /* vblank irq off -> vupdate irq off */
5786 rc = dm_set_vupdate_irq(crtc, false);
5792 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5794 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5797 if (amdgpu_in_reset(adev))
5800 #if defined(CONFIG_DRM_AMD_DC_DCN)
5801 spin_lock_irqsave(&dm->vblank_lock, flags);
5802 dm->vblank_workqueue->dm = dm;
5803 dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
5804 dm->vblank_workqueue->enable = enable;
5805 spin_unlock_irqrestore(&dm->vblank_lock, flags);
5806 schedule_work(&dm->vblank_workqueue->mall_work);
5812 static int dm_enable_vblank(struct drm_crtc *crtc)
5814 return dm_set_vblank(crtc, true);
5817 static void dm_disable_vblank(struct drm_crtc *crtc)
5819 dm_set_vblank(crtc, false);
5822 /* Implemented only the options currently availible for the driver */
5823 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5824 .reset = dm_crtc_reset_state,
5825 .destroy = amdgpu_dm_crtc_destroy,
5826 .set_config = drm_atomic_helper_set_config,
5827 .page_flip = drm_atomic_helper_page_flip,
5828 .atomic_duplicate_state = dm_crtc_duplicate_state,
5829 .atomic_destroy_state = dm_crtc_destroy_state,
5830 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
5831 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5832 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5833 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
5834 .enable_vblank = dm_enable_vblank,
5835 .disable_vblank = dm_disable_vblank,
5836 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5837 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
5838 .late_register = amdgpu_dm_crtc_late_register,
5842 static enum drm_connector_status
5843 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5846 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5850 * 1. This interface is NOT called in context of HPD irq.
5851 * 2. This interface *is called* in context of user-mode ioctl. Which
5852 * makes it a bad place for *any* MST-related activity.
5855 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5856 !aconnector->fake_enable)
5857 connected = (aconnector->dc_sink != NULL);
5859 connected = (aconnector->base.force == DRM_FORCE_ON);
5861 update_subconnector_property(aconnector);
5863 return (connected ? connector_status_connected :
5864 connector_status_disconnected);
5867 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5868 struct drm_connector_state *connector_state,
5869 struct drm_property *property,
5872 struct drm_device *dev = connector->dev;
5873 struct amdgpu_device *adev = drm_to_adev(dev);
5874 struct dm_connector_state *dm_old_state =
5875 to_dm_connector_state(connector->state);
5876 struct dm_connector_state *dm_new_state =
5877 to_dm_connector_state(connector_state);
5881 if (property == dev->mode_config.scaling_mode_property) {
5882 enum amdgpu_rmx_type rmx_type;
5885 case DRM_MODE_SCALE_CENTER:
5886 rmx_type = RMX_CENTER;
5888 case DRM_MODE_SCALE_ASPECT:
5889 rmx_type = RMX_ASPECT;
5891 case DRM_MODE_SCALE_FULLSCREEN:
5892 rmx_type = RMX_FULL;
5894 case DRM_MODE_SCALE_NONE:
5900 if (dm_old_state->scaling == rmx_type)
5903 dm_new_state->scaling = rmx_type;
5905 } else if (property == adev->mode_info.underscan_hborder_property) {
5906 dm_new_state->underscan_hborder = val;
5908 } else if (property == adev->mode_info.underscan_vborder_property) {
5909 dm_new_state->underscan_vborder = val;
5911 } else if (property == adev->mode_info.underscan_property) {
5912 dm_new_state->underscan_enable = val;
5914 } else if (property == adev->mode_info.abm_level_property) {
5915 dm_new_state->abm_level = val;
5922 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5923 const struct drm_connector_state *state,
5924 struct drm_property *property,
5927 struct drm_device *dev = connector->dev;
5928 struct amdgpu_device *adev = drm_to_adev(dev);
5929 struct dm_connector_state *dm_state =
5930 to_dm_connector_state(state);
5933 if (property == dev->mode_config.scaling_mode_property) {
5934 switch (dm_state->scaling) {
5936 *val = DRM_MODE_SCALE_CENTER;
5939 *val = DRM_MODE_SCALE_ASPECT;
5942 *val = DRM_MODE_SCALE_FULLSCREEN;
5946 *val = DRM_MODE_SCALE_NONE;
5950 } else if (property == adev->mode_info.underscan_hborder_property) {
5951 *val = dm_state->underscan_hborder;
5953 } else if (property == adev->mode_info.underscan_vborder_property) {
5954 *val = dm_state->underscan_vborder;
5956 } else if (property == adev->mode_info.underscan_property) {
5957 *val = dm_state->underscan_enable;
5959 } else if (property == adev->mode_info.abm_level_property) {
5960 *val = dm_state->abm_level;
5967 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5969 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5971 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5974 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5976 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5977 const struct dc_link *link = aconnector->dc_link;
5978 struct amdgpu_device *adev = drm_to_adev(connector->dev);
5979 struct amdgpu_display_manager *dm = &adev->dm;
5982 * Call only if mst_mgr was iniitalized before since it's not done
5983 * for all connector types.
5985 if (aconnector->mst_mgr.dev)
5986 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5988 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5989 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5991 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5992 link->type != dc_connection_none &&
5993 dm->backlight_dev) {
5994 backlight_device_unregister(dm->backlight_dev);
5995 dm->backlight_dev = NULL;
5999 if (aconnector->dc_em_sink)
6000 dc_sink_release(aconnector->dc_em_sink);
6001 aconnector->dc_em_sink = NULL;
6002 if (aconnector->dc_sink)
6003 dc_sink_release(aconnector->dc_sink);
6004 aconnector->dc_sink = NULL;
6006 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6007 drm_connector_unregister(connector);
6008 drm_connector_cleanup(connector);
6009 if (aconnector->i2c) {
6010 i2c_del_adapter(&aconnector->i2c->base);
6011 kfree(aconnector->i2c);
6013 kfree(aconnector->dm_dp_aux.aux.name);
6018 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6020 struct dm_connector_state *state =
6021 to_dm_connector_state(connector->state);
6023 if (connector->state)
6024 __drm_atomic_helper_connector_destroy_state(connector->state);
6028 state = kzalloc(sizeof(*state), GFP_KERNEL);
6031 state->scaling = RMX_OFF;
6032 state->underscan_enable = false;
6033 state->underscan_hborder = 0;
6034 state->underscan_vborder = 0;
6035 state->base.max_requested_bpc = 8;
6036 state->vcpi_slots = 0;
6038 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6039 state->abm_level = amdgpu_dm_abm_level;
6041 __drm_atomic_helper_connector_reset(connector, &state->base);
6045 struct drm_connector_state *
6046 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6048 struct dm_connector_state *state =
6049 to_dm_connector_state(connector->state);
6051 struct dm_connector_state *new_state =
6052 kmemdup(state, sizeof(*state), GFP_KERNEL);
6057 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6059 new_state->freesync_capable = state->freesync_capable;
6060 new_state->abm_level = state->abm_level;
6061 new_state->scaling = state->scaling;
6062 new_state->underscan_enable = state->underscan_enable;
6063 new_state->underscan_hborder = state->underscan_hborder;
6064 new_state->underscan_vborder = state->underscan_vborder;
6065 new_state->vcpi_slots = state->vcpi_slots;
6066 new_state->pbn = state->pbn;
6067 return &new_state->base;
6071 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6073 struct amdgpu_dm_connector *amdgpu_dm_connector =
6074 to_amdgpu_dm_connector(connector);
6077 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6078 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6079 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6080 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6085 #if defined(CONFIG_DEBUG_FS)
6086 connector_debugfs_init(amdgpu_dm_connector);
6092 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6093 .reset = amdgpu_dm_connector_funcs_reset,
6094 .detect = amdgpu_dm_connector_detect,
6095 .fill_modes = drm_helper_probe_single_connector_modes,
6096 .destroy = amdgpu_dm_connector_destroy,
6097 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6098 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6099 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6100 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6101 .late_register = amdgpu_dm_connector_late_register,
6102 .early_unregister = amdgpu_dm_connector_unregister
6105 static int get_modes(struct drm_connector *connector)
6107 return amdgpu_dm_connector_get_modes(connector);
6110 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6112 struct dc_sink_init_data init_params = {
6113 .link = aconnector->dc_link,
6114 .sink_signal = SIGNAL_TYPE_VIRTUAL
6118 if (!aconnector->base.edid_blob_ptr) {
6119 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6120 aconnector->base.name);
6122 aconnector->base.force = DRM_FORCE_OFF;
6123 aconnector->base.override_edid = false;
6127 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6129 aconnector->edid = edid;
6131 aconnector->dc_em_sink = dc_link_add_remote_sink(
6132 aconnector->dc_link,
6134 (edid->extensions + 1) * EDID_LENGTH,
6137 if (aconnector->base.force == DRM_FORCE_ON) {
6138 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6139 aconnector->dc_link->local_sink :
6140 aconnector->dc_em_sink;
6141 dc_sink_retain(aconnector->dc_sink);
6145 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6147 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6150 * In case of headless boot with force on for DP managed connector
6151 * Those settings have to be != 0 to get initial modeset
6153 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6154 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6155 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6159 aconnector->base.override_edid = true;
6160 create_eml_sink(aconnector);
6163 static struct dc_stream_state *
6164 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6165 const struct drm_display_mode *drm_mode,
6166 const struct dm_connector_state *dm_state,
6167 const struct dc_stream_state *old_stream)
6169 struct drm_connector *connector = &aconnector->base;
6170 struct amdgpu_device *adev = drm_to_adev(connector->dev);
6171 struct dc_stream_state *stream;
6172 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6173 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6174 enum dc_status dc_result = DC_OK;
6177 stream = create_stream_for_sink(aconnector, drm_mode,
6178 dm_state, old_stream,
6180 if (stream == NULL) {
6181 DRM_ERROR("Failed to create stream for sink!\n");
6185 dc_result = dc_validate_stream(adev->dm.dc, stream);
6187 if (dc_result != DC_OK) {
6188 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6193 dc_status_to_str(dc_result));
6195 dc_stream_release(stream);
6197 requested_bpc -= 2; /* lower bpc to retry validation */
6200 } while (stream == NULL && requested_bpc >= 6);
6202 if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6203 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6205 aconnector->force_yuv420_output = true;
6206 stream = create_validate_stream_for_sink(aconnector, drm_mode,
6207 dm_state, old_stream);
6208 aconnector->force_yuv420_output = false;
6214 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6215 struct drm_display_mode *mode)
6217 int result = MODE_ERROR;
6218 struct dc_sink *dc_sink;
6219 /* TODO: Unhardcode stream count */
6220 struct dc_stream_state *stream;
6221 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6223 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6224 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6228 * Only run this the first time mode_valid is called to initilialize
6231 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6232 !aconnector->dc_em_sink)
6233 handle_edid_mgmt(aconnector);
6235 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6237 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6238 aconnector->base.force != DRM_FORCE_ON) {
6239 DRM_ERROR("dc_sink is NULL!\n");
6243 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6245 dc_stream_release(stream);
6250 /* TODO: error handling*/
6254 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6255 struct dc_info_packet *out)
6257 struct hdmi_drm_infoframe frame;
6258 unsigned char buf[30]; /* 26 + 4 */
6262 memset(out, 0, sizeof(*out));
6264 if (!state->hdr_output_metadata)
6267 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6271 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6275 /* Static metadata is a fixed 26 bytes + 4 byte header. */
6279 /* Prepare the infopacket for DC. */
6280 switch (state->connector->connector_type) {
6281 case DRM_MODE_CONNECTOR_HDMIA:
6282 out->hb0 = 0x87; /* type */
6283 out->hb1 = 0x01; /* version */
6284 out->hb2 = 0x1A; /* length */
6285 out->sb[0] = buf[3]; /* checksum */
6289 case DRM_MODE_CONNECTOR_DisplayPort:
6290 case DRM_MODE_CONNECTOR_eDP:
6291 out->hb0 = 0x00; /* sdp id, zero */
6292 out->hb1 = 0x87; /* type */
6293 out->hb2 = 0x1D; /* payload len - 1 */
6294 out->hb3 = (0x13 << 2); /* sdp version */
6295 out->sb[0] = 0x01; /* version */
6296 out->sb[1] = 0x1A; /* length */
6304 memcpy(&out->sb[i], &buf[4], 26);
6307 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6308 sizeof(out->sb), false);
6314 is_hdr_metadata_different(const struct drm_connector_state *old_state,
6315 const struct drm_connector_state *new_state)
6317 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
6318 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
6320 if (old_blob != new_blob) {
6321 if (old_blob && new_blob &&
6322 old_blob->length == new_blob->length)
6323 return memcmp(old_blob->data, new_blob->data,
6333 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6334 struct drm_atomic_state *state)
6336 struct drm_connector_state *new_con_state =
6337 drm_atomic_get_new_connector_state(state, conn);
6338 struct drm_connector_state *old_con_state =
6339 drm_atomic_get_old_connector_state(state, conn);
6340 struct drm_crtc *crtc = new_con_state->crtc;
6341 struct drm_crtc_state *new_crtc_state;
6344 trace_amdgpu_dm_connector_atomic_check(new_con_state);
6349 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
6350 struct dc_info_packet hdr_infopacket;
6352 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6356 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6357 if (IS_ERR(new_crtc_state))
6358 return PTR_ERR(new_crtc_state);
6361 * DC considers the stream backends changed if the
6362 * static metadata changes. Forcing the modeset also
6363 * gives a simple way for userspace to switch from
6364 * 8bpc to 10bpc when setting the metadata to enter
6367 * Changing the static metadata after it's been
6368 * set is permissible, however. So only force a
6369 * modeset if we're entering or exiting HDR.
6371 new_crtc_state->mode_changed =
6372 !old_con_state->hdr_output_metadata ||
6373 !new_con_state->hdr_output_metadata;
6379 static const struct drm_connector_helper_funcs
6380 amdgpu_dm_connector_helper_funcs = {
6382 * If hotplugging a second bigger display in FB Con mode, bigger resolution
6383 * modes will be filtered by drm_mode_validate_size(), and those modes
6384 * are missing after user start lightdm. So we need to renew modes list.
6385 * in get_modes call back, not just return the modes count
6387 .get_modes = get_modes,
6388 .mode_valid = amdgpu_dm_connector_mode_valid,
6389 .atomic_check = amdgpu_dm_connector_atomic_check,
6392 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6396 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6398 struct drm_atomic_state *state = new_crtc_state->state;
6399 struct drm_plane *plane;
6402 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6403 struct drm_plane_state *new_plane_state;
6405 /* Cursor planes are "fake". */
6406 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6409 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6411 if (!new_plane_state) {
6413 * The plane is enable on the CRTC and hasn't changed
6414 * state. This means that it previously passed
6415 * validation and is therefore enabled.
6421 /* We need a framebuffer to be considered enabled. */
6422 num_active += (new_plane_state->fb != NULL);
6428 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6429 struct drm_crtc_state *new_crtc_state)
6431 struct dm_crtc_state *dm_new_crtc_state =
6432 to_dm_crtc_state(new_crtc_state);
6434 dm_new_crtc_state->active_planes = 0;
6436 if (!dm_new_crtc_state->stream)
6439 dm_new_crtc_state->active_planes =
6440 count_crtc_active_planes(new_crtc_state);
6443 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6444 struct drm_atomic_state *state)
6446 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6448 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6449 struct dc *dc = adev->dm.dc;
6450 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6453 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6455 dm_update_crtc_active_planes(crtc, crtc_state);
6457 if (unlikely(!dm_crtc_state->stream &&
6458 modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6464 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6465 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6466 * planes are disabled, which is not supported by the hardware. And there is legacy
6467 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6469 if (crtc_state->enable &&
6470 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6471 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6475 /* In some use cases, like reset, no stream is attached */
6476 if (!dm_crtc_state->stream)
6479 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6482 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6486 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6487 const struct drm_display_mode *mode,
6488 struct drm_display_mode *adjusted_mode)
6493 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6494 .disable = dm_crtc_helper_disable,
6495 .atomic_check = dm_crtc_helper_atomic_check,
6496 .mode_fixup = dm_crtc_helper_mode_fixup,
6497 .get_scanout_position = amdgpu_crtc_get_scanout_position,
6500 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6505 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6507 switch (display_color_depth) {
6508 case COLOR_DEPTH_666:
6510 case COLOR_DEPTH_888:
6512 case COLOR_DEPTH_101010:
6514 case COLOR_DEPTH_121212:
6516 case COLOR_DEPTH_141414:
6518 case COLOR_DEPTH_161616:
6526 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6527 struct drm_crtc_state *crtc_state,
6528 struct drm_connector_state *conn_state)
6530 struct drm_atomic_state *state = crtc_state->state;
6531 struct drm_connector *connector = conn_state->connector;
6532 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6533 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6534 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6535 struct drm_dp_mst_topology_mgr *mst_mgr;
6536 struct drm_dp_mst_port *mst_port;
6537 enum dc_color_depth color_depth;
6539 bool is_y420 = false;
6541 if (!aconnector->port || !aconnector->dc_sink)
6544 mst_port = aconnector->port;
6545 mst_mgr = &aconnector->mst_port->mst_mgr;
6547 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6550 if (!state->duplicated) {
6551 int max_bpc = conn_state->max_requested_bpc;
6552 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6553 aconnector->force_yuv420_output;
6554 color_depth = convert_color_depth_from_display_info(connector,
6557 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6558 clock = adjusted_mode->clock;
6559 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6561 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6564 dm_new_connector_state->pbn,
6565 dm_mst_get_pbn_divider(aconnector->dc_link));
6566 if (dm_new_connector_state->vcpi_slots < 0) {
6567 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6568 return dm_new_connector_state->vcpi_slots;
6573 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6574 .disable = dm_encoder_helper_disable,
6575 .atomic_check = dm_encoder_helper_atomic_check
6578 #if defined(CONFIG_DRM_AMD_DC_DCN)
6579 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6580 struct dc_state *dc_state)
6582 struct dc_stream_state *stream = NULL;
6583 struct drm_connector *connector;
6584 struct drm_connector_state *new_con_state;
6585 struct amdgpu_dm_connector *aconnector;
6586 struct dm_connector_state *dm_conn_state;
6587 int i, j, clock, bpp;
6588 int vcpi, pbn_div, pbn = 0;
6590 for_each_new_connector_in_state(state, connector, new_con_state, i) {
6592 aconnector = to_amdgpu_dm_connector(connector);
6594 if (!aconnector->port)
6597 if (!new_con_state || !new_con_state->crtc)
6600 dm_conn_state = to_dm_connector_state(new_con_state);
6602 for (j = 0; j < dc_state->stream_count; j++) {
6603 stream = dc_state->streams[j];
6607 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6616 if (stream->timing.flags.DSC != 1) {
6617 drm_dp_mst_atomic_enable_dsc(state,
6625 pbn_div = dm_mst_get_pbn_divider(stream->link);
6626 bpp = stream->timing.dsc_cfg.bits_per_pixel;
6627 clock = stream->timing.pix_clk_100hz / 10;
6628 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6629 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6636 dm_conn_state->pbn = pbn;
6637 dm_conn_state->vcpi_slots = vcpi;
6643 static void dm_drm_plane_reset(struct drm_plane *plane)
6645 struct dm_plane_state *amdgpu_state = NULL;
6648 plane->funcs->atomic_destroy_state(plane, plane->state);
6650 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6651 WARN_ON(amdgpu_state == NULL);
6654 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6657 static struct drm_plane_state *
6658 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6660 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6662 old_dm_plane_state = to_dm_plane_state(plane->state);
6663 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6664 if (!dm_plane_state)
6667 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6669 if (old_dm_plane_state->dc_state) {
6670 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6671 dc_plane_state_retain(dm_plane_state->dc_state);
6674 return &dm_plane_state->base;
6677 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6678 struct drm_plane_state *state)
6680 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6682 if (dm_plane_state->dc_state)
6683 dc_plane_state_release(dm_plane_state->dc_state);
6685 drm_atomic_helper_plane_destroy_state(plane, state);
6688 static const struct drm_plane_funcs dm_plane_funcs = {
6689 .update_plane = drm_atomic_helper_update_plane,
6690 .disable_plane = drm_atomic_helper_disable_plane,
6691 .destroy = drm_primary_helper_destroy,
6692 .reset = dm_drm_plane_reset,
6693 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6694 .atomic_destroy_state = dm_drm_plane_destroy_state,
6695 .format_mod_supported = dm_plane_format_mod_supported,
6698 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6699 struct drm_plane_state *new_state)
6701 struct amdgpu_framebuffer *afb;
6702 struct drm_gem_object *obj;
6703 struct amdgpu_device *adev;
6704 struct amdgpu_bo *rbo;
6705 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6706 struct list_head list;
6707 struct ttm_validate_buffer tv;
6708 struct ww_acquire_ctx ticket;
6712 if (!new_state->fb) {
6713 DRM_DEBUG_KMS("No FB bound\n");
6717 afb = to_amdgpu_framebuffer(new_state->fb);
6718 obj = new_state->fb->obj[0];
6719 rbo = gem_to_amdgpu_bo(obj);
6720 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6721 INIT_LIST_HEAD(&list);
6725 list_add(&tv.head, &list);
6727 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6729 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6733 if (plane->type != DRM_PLANE_TYPE_CURSOR)
6734 domain = amdgpu_display_supported_domains(adev, rbo->flags);
6736 domain = AMDGPU_GEM_DOMAIN_VRAM;
6738 r = amdgpu_bo_pin(rbo, domain);
6739 if (unlikely(r != 0)) {
6740 if (r != -ERESTARTSYS)
6741 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6742 ttm_eu_backoff_reservation(&ticket, &list);
6746 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6747 if (unlikely(r != 0)) {
6748 amdgpu_bo_unpin(rbo);
6749 ttm_eu_backoff_reservation(&ticket, &list);
6750 DRM_ERROR("%p bind failed\n", rbo);
6754 ttm_eu_backoff_reservation(&ticket, &list);
6756 afb->address = amdgpu_bo_gpu_offset(rbo);
6761 * We don't do surface updates on planes that have been newly created,
6762 * but we also don't have the afb->address during atomic check.
6764 * Fill in buffer attributes depending on the address here, but only on
6765 * newly created planes since they're not being used by DC yet and this
6766 * won't modify global state.
6768 dm_plane_state_old = to_dm_plane_state(plane->state);
6769 dm_plane_state_new = to_dm_plane_state(new_state);
6771 if (dm_plane_state_new->dc_state &&
6772 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6773 struct dc_plane_state *plane_state =
6774 dm_plane_state_new->dc_state;
6775 bool force_disable_dcc = !plane_state->dcc.enable;
6777 fill_plane_buffer_attributes(
6778 adev, afb, plane_state->format, plane_state->rotation,
6780 &plane_state->tiling_info, &plane_state->plane_size,
6781 &plane_state->dcc, &plane_state->address,
6782 afb->tmz_surface, force_disable_dcc);
6788 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6789 struct drm_plane_state *old_state)
6791 struct amdgpu_bo *rbo;
6797 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6798 r = amdgpu_bo_reserve(rbo, false);
6800 DRM_ERROR("failed to reserve rbo before unpin\n");
6804 amdgpu_bo_unpin(rbo);
6805 amdgpu_bo_unreserve(rbo);
6806 amdgpu_bo_unref(&rbo);
6809 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6810 struct drm_crtc_state *new_crtc_state)
6812 struct drm_framebuffer *fb = state->fb;
6813 int min_downscale, max_upscale;
6815 int max_scale = INT_MAX;
6817 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6818 if (fb && state->crtc) {
6819 /* Validate viewport to cover the case when only the position changes */
6820 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
6821 int viewport_width = state->crtc_w;
6822 int viewport_height = state->crtc_h;
6824 if (state->crtc_x < 0)
6825 viewport_width += state->crtc_x;
6826 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
6827 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
6829 if (state->crtc_y < 0)
6830 viewport_height += state->crtc_y;
6831 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
6832 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
6834 if (viewport_width < 0 || viewport_height < 0) {
6835 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
6837 } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
6838 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
6840 } else if (viewport_height < MIN_VIEWPORT_SIZE) {
6841 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
6847 /* Get min/max allowed scaling factors from plane caps. */
6848 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6849 &min_downscale, &max_upscale);
6851 * Convert to drm convention: 16.16 fixed point, instead of dc's
6852 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6853 * dst/src, so min_scale = 1.0 / max_upscale, etc.
6855 min_scale = (1000 << 16) / max_upscale;
6856 max_scale = (1000 << 16) / min_downscale;
6859 return drm_atomic_helper_check_plane_state(
6860 state, new_crtc_state, min_scale, max_scale, true, true);
6863 static int dm_plane_atomic_check(struct drm_plane *plane,
6864 struct drm_atomic_state *state)
6866 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
6868 struct amdgpu_device *adev = drm_to_adev(plane->dev);
6869 struct dc *dc = adev->dm.dc;
6870 struct dm_plane_state *dm_plane_state;
6871 struct dc_scaling_info scaling_info;
6872 struct drm_crtc_state *new_crtc_state;
6875 trace_amdgpu_dm_plane_atomic_check(new_plane_state);
6877 dm_plane_state = to_dm_plane_state(new_plane_state);
6879 if (!dm_plane_state->dc_state)
6883 drm_atomic_get_new_crtc_state(state,
6884 new_plane_state->crtc);
6885 if (!new_crtc_state)
6888 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
6892 ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
6896 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6902 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6903 struct drm_atomic_state *state)
6905 /* Only support async updates on cursor planes. */
6906 if (plane->type != DRM_PLANE_TYPE_CURSOR)
6912 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6913 struct drm_atomic_state *state)
6915 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
6917 struct drm_plane_state *old_state =
6918 drm_atomic_get_old_plane_state(state, plane);
6920 trace_amdgpu_dm_atomic_update_cursor(new_state);
6922 swap(plane->state->fb, new_state->fb);
6924 plane->state->src_x = new_state->src_x;
6925 plane->state->src_y = new_state->src_y;
6926 plane->state->src_w = new_state->src_w;
6927 plane->state->src_h = new_state->src_h;
6928 plane->state->crtc_x = new_state->crtc_x;
6929 plane->state->crtc_y = new_state->crtc_y;
6930 plane->state->crtc_w = new_state->crtc_w;
6931 plane->state->crtc_h = new_state->crtc_h;
6933 handle_cursor_update(plane, old_state);
6936 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6937 .prepare_fb = dm_plane_helper_prepare_fb,
6938 .cleanup_fb = dm_plane_helper_cleanup_fb,
6939 .atomic_check = dm_plane_atomic_check,
6940 .atomic_async_check = dm_plane_atomic_async_check,
6941 .atomic_async_update = dm_plane_atomic_async_update
6945 * TODO: these are currently initialized to rgb formats only.
6946 * For future use cases we should either initialize them dynamically based on
6947 * plane capabilities, or initialize this array to all formats, so internal drm
6948 * check will succeed, and let DC implement proper check
6950 static const uint32_t rgb_formats[] = {
6951 DRM_FORMAT_XRGB8888,
6952 DRM_FORMAT_ARGB8888,
6953 DRM_FORMAT_RGBA8888,
6954 DRM_FORMAT_XRGB2101010,
6955 DRM_FORMAT_XBGR2101010,
6956 DRM_FORMAT_ARGB2101010,
6957 DRM_FORMAT_ABGR2101010,
6958 DRM_FORMAT_XBGR8888,
6959 DRM_FORMAT_ABGR8888,
6963 static const uint32_t overlay_formats[] = {
6964 DRM_FORMAT_XRGB8888,
6965 DRM_FORMAT_ARGB8888,
6966 DRM_FORMAT_RGBA8888,
6967 DRM_FORMAT_XBGR8888,
6968 DRM_FORMAT_ABGR8888,
6972 static const u32 cursor_formats[] = {
6976 static int get_plane_formats(const struct drm_plane *plane,
6977 const struct dc_plane_cap *plane_cap,
6978 uint32_t *formats, int max_formats)
6980 int i, num_formats = 0;
6983 * TODO: Query support for each group of formats directly from
6984 * DC plane caps. This will require adding more formats to the
6988 switch (plane->type) {
6989 case DRM_PLANE_TYPE_PRIMARY:
6990 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6991 if (num_formats >= max_formats)
6994 formats[num_formats++] = rgb_formats[i];
6997 if (plane_cap && plane_cap->pixel_format_support.nv12)
6998 formats[num_formats++] = DRM_FORMAT_NV12;
6999 if (plane_cap && plane_cap->pixel_format_support.p010)
7000 formats[num_formats++] = DRM_FORMAT_P010;
7001 if (plane_cap && plane_cap->pixel_format_support.fp16) {
7002 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7003 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7004 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7005 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7009 case DRM_PLANE_TYPE_OVERLAY:
7010 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7011 if (num_formats >= max_formats)
7014 formats[num_formats++] = overlay_formats[i];
7018 case DRM_PLANE_TYPE_CURSOR:
7019 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7020 if (num_formats >= max_formats)
7023 formats[num_formats++] = cursor_formats[i];
7031 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7032 struct drm_plane *plane,
7033 unsigned long possible_crtcs,
7034 const struct dc_plane_cap *plane_cap)
7036 uint32_t formats[32];
7039 unsigned int supported_rotations;
7040 uint64_t *modifiers = NULL;
7042 num_formats = get_plane_formats(plane, plane_cap, formats,
7043 ARRAY_SIZE(formats));
7045 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7049 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7050 &dm_plane_funcs, formats, num_formats,
7051 modifiers, plane->type, NULL);
7056 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7057 plane_cap && plane_cap->per_pixel_alpha) {
7058 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7059 BIT(DRM_MODE_BLEND_PREMULTI);
7061 drm_plane_create_alpha_property(plane);
7062 drm_plane_create_blend_mode_property(plane, blend_caps);
7065 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7067 (plane_cap->pixel_format_support.nv12 ||
7068 plane_cap->pixel_format_support.p010)) {
7069 /* This only affects YUV formats. */
7070 drm_plane_create_color_properties(
7072 BIT(DRM_COLOR_YCBCR_BT601) |
7073 BIT(DRM_COLOR_YCBCR_BT709) |
7074 BIT(DRM_COLOR_YCBCR_BT2020),
7075 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7076 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7077 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7080 supported_rotations =
7081 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7082 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7084 if (dm->adev->asic_type >= CHIP_BONAIRE &&
7085 plane->type != DRM_PLANE_TYPE_CURSOR)
7086 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7087 supported_rotations);
7089 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7091 /* Create (reset) the plane state */
7092 if (plane->funcs->reset)
7093 plane->funcs->reset(plane);
7098 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7099 struct drm_plane *plane,
7100 uint32_t crtc_index)
7102 struct amdgpu_crtc *acrtc = NULL;
7103 struct drm_plane *cursor_plane;
7107 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7111 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7112 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7114 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7118 res = drm_crtc_init_with_planes(
7123 &amdgpu_dm_crtc_funcs, NULL);
7128 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7130 /* Create (reset) the plane state */
7131 if (acrtc->base.funcs->reset)
7132 acrtc->base.funcs->reset(&acrtc->base);
7134 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7135 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7137 acrtc->crtc_id = crtc_index;
7138 acrtc->base.enabled = false;
7139 acrtc->otg_inst = -1;
7141 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7142 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7143 true, MAX_COLOR_LUT_ENTRIES);
7144 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7150 kfree(cursor_plane);
7155 static int to_drm_connector_type(enum signal_type st)
7158 case SIGNAL_TYPE_HDMI_TYPE_A:
7159 return DRM_MODE_CONNECTOR_HDMIA;
7160 case SIGNAL_TYPE_EDP:
7161 return DRM_MODE_CONNECTOR_eDP;
7162 case SIGNAL_TYPE_LVDS:
7163 return DRM_MODE_CONNECTOR_LVDS;
7164 case SIGNAL_TYPE_RGB:
7165 return DRM_MODE_CONNECTOR_VGA;
7166 case SIGNAL_TYPE_DISPLAY_PORT:
7167 case SIGNAL_TYPE_DISPLAY_PORT_MST:
7168 return DRM_MODE_CONNECTOR_DisplayPort;
7169 case SIGNAL_TYPE_DVI_DUAL_LINK:
7170 case SIGNAL_TYPE_DVI_SINGLE_LINK:
7171 return DRM_MODE_CONNECTOR_DVID;
7172 case SIGNAL_TYPE_VIRTUAL:
7173 return DRM_MODE_CONNECTOR_VIRTUAL;
7176 return DRM_MODE_CONNECTOR_Unknown;
7180 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7182 struct drm_encoder *encoder;
7184 /* There is only one encoder per connector */
7185 drm_connector_for_each_possible_encoder(connector, encoder)
7191 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7193 struct drm_encoder *encoder;
7194 struct amdgpu_encoder *amdgpu_encoder;
7196 encoder = amdgpu_dm_connector_to_encoder(connector);
7198 if (encoder == NULL)
7201 amdgpu_encoder = to_amdgpu_encoder(encoder);
7203 amdgpu_encoder->native_mode.clock = 0;
7205 if (!list_empty(&connector->probed_modes)) {
7206 struct drm_display_mode *preferred_mode = NULL;
7208 list_for_each_entry(preferred_mode,
7209 &connector->probed_modes,
7211 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7212 amdgpu_encoder->native_mode = *preferred_mode;
7220 static struct drm_display_mode *
7221 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7223 int hdisplay, int vdisplay)
7225 struct drm_device *dev = encoder->dev;
7226 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7227 struct drm_display_mode *mode = NULL;
7228 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7230 mode = drm_mode_duplicate(dev, native_mode);
7235 mode->hdisplay = hdisplay;
7236 mode->vdisplay = vdisplay;
7237 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7238 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7244 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7245 struct drm_connector *connector)
7247 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7248 struct drm_display_mode *mode = NULL;
7249 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7250 struct amdgpu_dm_connector *amdgpu_dm_connector =
7251 to_amdgpu_dm_connector(connector);
7255 char name[DRM_DISPLAY_MODE_LEN];
7258 } common_modes[] = {
7259 { "640x480", 640, 480},
7260 { "800x600", 800, 600},
7261 { "1024x768", 1024, 768},
7262 { "1280x720", 1280, 720},
7263 { "1280x800", 1280, 800},
7264 {"1280x1024", 1280, 1024},
7265 { "1440x900", 1440, 900},
7266 {"1680x1050", 1680, 1050},
7267 {"1600x1200", 1600, 1200},
7268 {"1920x1080", 1920, 1080},
7269 {"1920x1200", 1920, 1200}
7272 n = ARRAY_SIZE(common_modes);
7274 for (i = 0; i < n; i++) {
7275 struct drm_display_mode *curmode = NULL;
7276 bool mode_existed = false;
7278 if (common_modes[i].w > native_mode->hdisplay ||
7279 common_modes[i].h > native_mode->vdisplay ||
7280 (common_modes[i].w == native_mode->hdisplay &&
7281 common_modes[i].h == native_mode->vdisplay))
7284 list_for_each_entry(curmode, &connector->probed_modes, head) {
7285 if (common_modes[i].w == curmode->hdisplay &&
7286 common_modes[i].h == curmode->vdisplay) {
7287 mode_existed = true;
7295 mode = amdgpu_dm_create_common_mode(encoder,
7296 common_modes[i].name, common_modes[i].w,
7298 drm_mode_probed_add(connector, mode);
7299 amdgpu_dm_connector->num_modes++;
7303 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7306 struct amdgpu_dm_connector *amdgpu_dm_connector =
7307 to_amdgpu_dm_connector(connector);
7310 /* empty probed_modes */
7311 INIT_LIST_HEAD(&connector->probed_modes);
7312 amdgpu_dm_connector->num_modes =
7313 drm_add_edid_modes(connector, edid);
7315 /* sorting the probed modes before calling function
7316 * amdgpu_dm_get_native_mode() since EDID can have
7317 * more than one preferred mode. The modes that are
7318 * later in the probed mode list could be of higher
7319 * and preferred resolution. For example, 3840x2160
7320 * resolution in base EDID preferred timing and 4096x2160
7321 * preferred resolution in DID extension block later.
7323 drm_mode_sort(&connector->probed_modes);
7324 amdgpu_dm_get_native_mode(connector);
7326 /* Freesync capabilities are reset by calling
7327 * drm_add_edid_modes() and need to be
7330 amdgpu_dm_update_freesync_caps(connector, edid);
7332 amdgpu_dm_connector->num_modes = 0;
7336 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7337 struct drm_display_mode *mode)
7339 struct drm_display_mode *m;
7341 list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7342 if (drm_mode_equal(m, mode))
7349 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7351 const struct drm_display_mode *m;
7352 struct drm_display_mode *new_mode;
7354 uint32_t new_modes_count = 0;
7356 /* Standard FPS values
7365 * 60 - Commonly used
7366 * 48,72,96 - Multiples of 24
7368 const uint32_t common_rates[] = { 23976, 24000, 25000, 29970, 30000,
7369 48000, 50000, 60000, 72000, 96000 };
7372 * Find mode with highest refresh rate with the same resolution
7373 * as the preferred mode. Some monitors report a preferred mode
7374 * with lower resolution than the highest refresh rate supported.
7377 m = get_highest_refresh_rate_mode(aconnector, true);
7381 for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7382 uint64_t target_vtotal, target_vtotal_diff;
7385 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7388 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7389 common_rates[i] > aconnector->max_vfreq * 1000)
7392 num = (unsigned long long)m->clock * 1000 * 1000;
7393 den = common_rates[i] * (unsigned long long)m->htotal;
7394 target_vtotal = div_u64(num, den);
7395 target_vtotal_diff = target_vtotal - m->vtotal;
7397 /* Check for illegal modes */
7398 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7399 m->vsync_end + target_vtotal_diff < m->vsync_start ||
7400 m->vtotal + target_vtotal_diff < m->vsync_end)
7403 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7407 new_mode->vtotal += (u16)target_vtotal_diff;
7408 new_mode->vsync_start += (u16)target_vtotal_diff;
7409 new_mode->vsync_end += (u16)target_vtotal_diff;
7410 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7411 new_mode->type |= DRM_MODE_TYPE_DRIVER;
7413 if (!is_duplicate_mode(aconnector, new_mode)) {
7414 drm_mode_probed_add(&aconnector->base, new_mode);
7415 new_modes_count += 1;
7417 drm_mode_destroy(aconnector->base.dev, new_mode);
7420 return new_modes_count;
7423 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7426 struct amdgpu_dm_connector *amdgpu_dm_connector =
7427 to_amdgpu_dm_connector(connector);
7429 if (!(amdgpu_freesync_vid_mode && edid))
7432 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7433 amdgpu_dm_connector->num_modes +=
7434 add_fs_modes(amdgpu_dm_connector);
7437 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
7439 struct amdgpu_dm_connector *amdgpu_dm_connector =
7440 to_amdgpu_dm_connector(connector);
7441 struct drm_encoder *encoder;
7442 struct edid *edid = amdgpu_dm_connector->edid;
7444 encoder = amdgpu_dm_connector_to_encoder(connector);
7446 if (!drm_edid_is_valid(edid)) {
7447 amdgpu_dm_connector->num_modes =
7448 drm_add_modes_noedid(connector, 640, 480);
7450 amdgpu_dm_connector_ddc_get_modes(connector, edid);
7451 amdgpu_dm_connector_add_common_modes(encoder, connector);
7452 amdgpu_dm_connector_add_freesync_modes(connector, edid);
7454 amdgpu_dm_fbc_init(connector);
7456 return amdgpu_dm_connector->num_modes;
7459 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7460 struct amdgpu_dm_connector *aconnector,
7462 struct dc_link *link,
7465 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7468 * Some of the properties below require access to state, like bpc.
7469 * Allocate some default initial connector state with our reset helper.
7471 if (aconnector->base.funcs->reset)
7472 aconnector->base.funcs->reset(&aconnector->base);
7474 aconnector->connector_id = link_index;
7475 aconnector->dc_link = link;
7476 aconnector->base.interlace_allowed = false;
7477 aconnector->base.doublescan_allowed = false;
7478 aconnector->base.stereo_allowed = false;
7479 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7480 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7481 aconnector->audio_inst = -1;
7482 mutex_init(&aconnector->hpd_lock);
7485 * configure support HPD hot plug connector_>polled default value is 0
7486 * which means HPD hot plug not supported
7488 switch (connector_type) {
7489 case DRM_MODE_CONNECTOR_HDMIA:
7490 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7491 aconnector->base.ycbcr_420_allowed =
7492 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7494 case DRM_MODE_CONNECTOR_DisplayPort:
7495 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7496 aconnector->base.ycbcr_420_allowed =
7497 link->link_enc->features.dp_ycbcr420_supported ? true : false;
7499 case DRM_MODE_CONNECTOR_DVID:
7500 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7506 drm_object_attach_property(&aconnector->base.base,
7507 dm->ddev->mode_config.scaling_mode_property,
7508 DRM_MODE_SCALE_NONE);
7510 drm_object_attach_property(&aconnector->base.base,
7511 adev->mode_info.underscan_property,
7513 drm_object_attach_property(&aconnector->base.base,
7514 adev->mode_info.underscan_hborder_property,
7516 drm_object_attach_property(&aconnector->base.base,
7517 adev->mode_info.underscan_vborder_property,
7520 if (!aconnector->mst_port)
7521 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7523 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
7524 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7525 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7527 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7528 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7529 drm_object_attach_property(&aconnector->base.base,
7530 adev->mode_info.abm_level_property, 0);
7533 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7534 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7535 connector_type == DRM_MODE_CONNECTOR_eDP) {
7536 drm_object_attach_property(
7537 &aconnector->base.base,
7538 dm->ddev->mode_config.hdr_output_metadata_property, 0);
7540 if (!aconnector->mst_port)
7541 drm_connector_attach_vrr_capable_property(&aconnector->base);
7543 #ifdef CONFIG_DRM_AMD_DC_HDCP
7544 if (adev->dm.hdcp_workqueue)
7545 drm_connector_attach_content_protection_property(&aconnector->base, true);
7550 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7551 struct i2c_msg *msgs, int num)
7553 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7554 struct ddc_service *ddc_service = i2c->ddc_service;
7555 struct i2c_command cmd;
7559 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7564 cmd.number_of_payloads = num;
7565 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7568 for (i = 0; i < num; i++) {
7569 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7570 cmd.payloads[i].address = msgs[i].addr;
7571 cmd.payloads[i].length = msgs[i].len;
7572 cmd.payloads[i].data = msgs[i].buf;
7576 ddc_service->ctx->dc,
7577 ddc_service->ddc_pin->hw_info.ddc_channel,
7581 kfree(cmd.payloads);
7585 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7587 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7590 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7591 .master_xfer = amdgpu_dm_i2c_xfer,
7592 .functionality = amdgpu_dm_i2c_func,
7595 static struct amdgpu_i2c_adapter *
7596 create_i2c(struct ddc_service *ddc_service,
7600 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7601 struct amdgpu_i2c_adapter *i2c;
7603 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7606 i2c->base.owner = THIS_MODULE;
7607 i2c->base.class = I2C_CLASS_DDC;
7608 i2c->base.dev.parent = &adev->pdev->dev;
7609 i2c->base.algo = &amdgpu_dm_i2c_algo;
7610 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7611 i2c_set_adapdata(&i2c->base, i2c);
7612 i2c->ddc_service = ddc_service;
7613 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7620 * Note: this function assumes that dc_link_detect() was called for the
7621 * dc_link which will be represented by this aconnector.
7623 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7624 struct amdgpu_dm_connector *aconnector,
7625 uint32_t link_index,
7626 struct amdgpu_encoder *aencoder)
7630 struct dc *dc = dm->dc;
7631 struct dc_link *link = dc_get_link_at_index(dc, link_index);
7632 struct amdgpu_i2c_adapter *i2c;
7634 link->priv = aconnector;
7636 DRM_DEBUG_DRIVER("%s()\n", __func__);
7638 i2c = create_i2c(link->ddc, link->link_index, &res);
7640 DRM_ERROR("Failed to create i2c adapter data\n");
7644 aconnector->i2c = i2c;
7645 res = i2c_add_adapter(&i2c->base);
7648 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7652 connector_type = to_drm_connector_type(link->connector_signal);
7654 res = drm_connector_init_with_ddc(
7657 &amdgpu_dm_connector_funcs,
7662 DRM_ERROR("connector_init failed\n");
7663 aconnector->connector_id = -1;
7667 drm_connector_helper_add(
7669 &amdgpu_dm_connector_helper_funcs);
7671 amdgpu_dm_connector_init_helper(
7678 drm_connector_attach_encoder(
7679 &aconnector->base, &aencoder->base);
7681 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7682 || connector_type == DRM_MODE_CONNECTOR_eDP)
7683 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7688 aconnector->i2c = NULL;
7693 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7695 switch (adev->mode_info.num_crtc) {
7712 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7713 struct amdgpu_encoder *aencoder,
7714 uint32_t link_index)
7716 struct amdgpu_device *adev = drm_to_adev(dev);
7718 int res = drm_encoder_init(dev,
7720 &amdgpu_dm_encoder_funcs,
7721 DRM_MODE_ENCODER_TMDS,
7724 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7727 aencoder->encoder_id = link_index;
7729 aencoder->encoder_id = -1;
7731 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7736 static void manage_dm_interrupts(struct amdgpu_device *adev,
7737 struct amdgpu_crtc *acrtc,
7741 * We have no guarantee that the frontend index maps to the same
7742 * backend index - some even map to more than one.
7744 * TODO: Use a different interrupt or check DC itself for the mapping.
7747 amdgpu_display_crtc_idx_to_irq_type(
7752 drm_crtc_vblank_on(&acrtc->base);
7755 &adev->pageflip_irq,
7757 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7764 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7772 &adev->pageflip_irq,
7774 drm_crtc_vblank_off(&acrtc->base);
7778 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7779 struct amdgpu_crtc *acrtc)
7782 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7785 * This reads the current state for the IRQ and force reapplies
7786 * the setting to hardware.
7788 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7792 is_scaling_state_different(const struct dm_connector_state *dm_state,
7793 const struct dm_connector_state *old_dm_state)
7795 if (dm_state->scaling != old_dm_state->scaling)
7797 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7798 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7800 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7801 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7803 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7804 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7809 #ifdef CONFIG_DRM_AMD_DC_HDCP
7810 static bool is_content_protection_different(struct drm_connector_state *state,
7811 const struct drm_connector_state *old_state,
7812 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7814 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7815 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7817 /* Handle: Type0/1 change */
7818 if (old_state->hdcp_content_type != state->hdcp_content_type &&
7819 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7820 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7824 /* CP is being re enabled, ignore this
7826 * Handles: ENABLED -> DESIRED
7828 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7829 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7830 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7834 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7836 * Handles: UNDESIRED -> ENABLED
7838 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7839 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7840 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7842 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7843 * hot-plug, headless s3, dpms
7845 * Handles: DESIRED -> DESIRED (Special case)
7847 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7848 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7849 dm_con_state->update_hdcp = false;
7854 * Handles: UNDESIRED -> UNDESIRED
7855 * DESIRED -> DESIRED
7856 * ENABLED -> ENABLED
7858 if (old_state->content_protection == state->content_protection)
7862 * Handles: UNDESIRED -> DESIRED
7863 * DESIRED -> UNDESIRED
7864 * ENABLED -> UNDESIRED
7866 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7870 * Handles: DESIRED -> ENABLED
7876 static void remove_stream(struct amdgpu_device *adev,
7877 struct amdgpu_crtc *acrtc,
7878 struct dc_stream_state *stream)
7880 /* this is the update mode case */
7882 acrtc->otg_inst = -1;
7883 acrtc->enabled = false;
7886 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7887 struct dc_cursor_position *position)
7889 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7891 int xorigin = 0, yorigin = 0;
7893 if (!crtc || !plane->state->fb)
7896 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7897 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7898 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7900 plane->state->crtc_w,
7901 plane->state->crtc_h);
7905 x = plane->state->crtc_x;
7906 y = plane->state->crtc_y;
7908 if (x <= -amdgpu_crtc->max_cursor_width ||
7909 y <= -amdgpu_crtc->max_cursor_height)
7913 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
7917 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
7920 position->enable = true;
7921 position->translate_by_source = true;
7924 position->x_hotspot = xorigin;
7925 position->y_hotspot = yorigin;
7930 static void handle_cursor_update(struct drm_plane *plane,
7931 struct drm_plane_state *old_plane_state)
7933 struct amdgpu_device *adev = drm_to_adev(plane->dev);
7934 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
7935 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
7936 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
7937 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7938 uint64_t address = afb ? afb->address : 0;
7939 struct dc_cursor_position position = {0};
7940 struct dc_cursor_attributes attributes;
7943 if (!plane->state->fb && !old_plane_state->fb)
7946 DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
7948 amdgpu_crtc->crtc_id,
7949 plane->state->crtc_w,
7950 plane->state->crtc_h);
7952 ret = get_cursor_position(plane, crtc, &position);
7956 if (!position.enable) {
7957 /* turn off cursor */
7958 if (crtc_state && crtc_state->stream) {
7959 mutex_lock(&adev->dm.dc_lock);
7960 dc_stream_set_cursor_position(crtc_state->stream,
7962 mutex_unlock(&adev->dm.dc_lock);
7967 amdgpu_crtc->cursor_width = plane->state->crtc_w;
7968 amdgpu_crtc->cursor_height = plane->state->crtc_h;
7970 memset(&attributes, 0, sizeof(attributes));
7971 attributes.address.high_part = upper_32_bits(address);
7972 attributes.address.low_part = lower_32_bits(address);
7973 attributes.width = plane->state->crtc_w;
7974 attributes.height = plane->state->crtc_h;
7975 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
7976 attributes.rotation_angle = 0;
7977 attributes.attribute_flags.value = 0;
7979 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
7981 if (crtc_state->stream) {
7982 mutex_lock(&adev->dm.dc_lock);
7983 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
7985 DRM_ERROR("DC failed to set cursor attributes\n");
7987 if (!dc_stream_set_cursor_position(crtc_state->stream,
7989 DRM_ERROR("DC failed to set cursor position\n");
7990 mutex_unlock(&adev->dm.dc_lock);
7994 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7997 assert_spin_locked(&acrtc->base.dev->event_lock);
7998 WARN_ON(acrtc->event);
8000 acrtc->event = acrtc->base.state->event;
8002 /* Set the flip status */
8003 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8005 /* Mark this event as consumed */
8006 acrtc->base.state->event = NULL;
8008 DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8012 static void update_freesync_state_on_stream(
8013 struct amdgpu_display_manager *dm,
8014 struct dm_crtc_state *new_crtc_state,
8015 struct dc_stream_state *new_stream,
8016 struct dc_plane_state *surface,
8017 u32 flip_timestamp_in_us)
8019 struct mod_vrr_params vrr_params;
8020 struct dc_info_packet vrr_infopacket = {0};
8021 struct amdgpu_device *adev = dm->adev;
8022 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8023 unsigned long flags;
8024 bool pack_sdp_v1_3 = false;
8030 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8031 * For now it's sufficient to just guard against these conditions.
8034 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8037 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8038 vrr_params = acrtc->dm_irq_params.vrr_params;
8041 mod_freesync_handle_preflip(
8042 dm->freesync_module,
8045 flip_timestamp_in_us,
8048 if (adev->family < AMDGPU_FAMILY_AI &&
8049 amdgpu_dm_vrr_active(new_crtc_state)) {
8050 mod_freesync_handle_v_update(dm->freesync_module,
8051 new_stream, &vrr_params);
8053 /* Need to call this before the frame ends. */
8054 dc_stream_adjust_vmin_vmax(dm->dc,
8055 new_crtc_state->stream,
8056 &vrr_params.adjust);
8060 mod_freesync_build_vrr_infopacket(
8061 dm->freesync_module,
8065 TRANSFER_FUNC_UNKNOWN,
8069 new_crtc_state->freesync_timing_changed |=
8070 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8072 sizeof(vrr_params.adjust)) != 0);
8074 new_crtc_state->freesync_vrr_info_changed |=
8075 (memcmp(&new_crtc_state->vrr_infopacket,
8077 sizeof(vrr_infopacket)) != 0);
8079 acrtc->dm_irq_params.vrr_params = vrr_params;
8080 new_crtc_state->vrr_infopacket = vrr_infopacket;
8082 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8083 new_stream->vrr_infopacket = vrr_infopacket;
8085 if (new_crtc_state->freesync_vrr_info_changed)
8086 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8087 new_crtc_state->base.crtc->base.id,
8088 (int)new_crtc_state->base.vrr_enabled,
8089 (int)vrr_params.state);
8091 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8094 static void update_stream_irq_parameters(
8095 struct amdgpu_display_manager *dm,
8096 struct dm_crtc_state *new_crtc_state)
8098 struct dc_stream_state *new_stream = new_crtc_state->stream;
8099 struct mod_vrr_params vrr_params;
8100 struct mod_freesync_config config = new_crtc_state->freesync_config;
8101 struct amdgpu_device *adev = dm->adev;
8102 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8103 unsigned long flags;
8109 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8110 * For now it's sufficient to just guard against these conditions.
8112 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8115 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8116 vrr_params = acrtc->dm_irq_params.vrr_params;
8118 if (new_crtc_state->vrr_supported &&
8119 config.min_refresh_in_uhz &&
8120 config.max_refresh_in_uhz) {
8122 * if freesync compatible mode was set, config.state will be set
8125 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8126 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8127 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8128 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8129 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8130 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8131 vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8133 config.state = new_crtc_state->base.vrr_enabled ?
8134 VRR_STATE_ACTIVE_VARIABLE :
8138 config.state = VRR_STATE_UNSUPPORTED;
8141 mod_freesync_build_vrr_params(dm->freesync_module,
8143 &config, &vrr_params);
8145 new_crtc_state->freesync_timing_changed |=
8146 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8147 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8149 new_crtc_state->freesync_config = config;
8150 /* Copy state for access from DM IRQ handler */
8151 acrtc->dm_irq_params.freesync_config = config;
8152 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8153 acrtc->dm_irq_params.vrr_params = vrr_params;
8154 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8157 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8158 struct dm_crtc_state *new_state)
8160 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8161 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8163 if (!old_vrr_active && new_vrr_active) {
8164 /* Transition VRR inactive -> active:
8165 * While VRR is active, we must not disable vblank irq, as a
8166 * reenable after disable would compute bogus vblank/pflip
8167 * timestamps if it likely happened inside display front-porch.
8169 * We also need vupdate irq for the actual core vblank handling
8172 dm_set_vupdate_irq(new_state->base.crtc, true);
8173 drm_crtc_vblank_get(new_state->base.crtc);
8174 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8175 __func__, new_state->base.crtc->base.id);
8176 } else if (old_vrr_active && !new_vrr_active) {
8177 /* Transition VRR active -> inactive:
8178 * Allow vblank irq disable again for fixed refresh rate.
8180 dm_set_vupdate_irq(new_state->base.crtc, false);
8181 drm_crtc_vblank_put(new_state->base.crtc);
8182 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8183 __func__, new_state->base.crtc->base.id);
8187 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8189 struct drm_plane *plane;
8190 struct drm_plane_state *old_plane_state;
8194 * TODO: Make this per-stream so we don't issue redundant updates for
8195 * commits with multiple streams.
8197 for_each_old_plane_in_state(state, plane, old_plane_state, i)
8198 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8199 handle_cursor_update(plane, old_plane_state);
8202 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8203 struct dc_state *dc_state,
8204 struct drm_device *dev,
8205 struct amdgpu_display_manager *dm,
8206 struct drm_crtc *pcrtc,
8207 bool wait_for_vblank)
8210 uint64_t timestamp_ns;
8211 struct drm_plane *plane;
8212 struct drm_plane_state *old_plane_state, *new_plane_state;
8213 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8214 struct drm_crtc_state *new_pcrtc_state =
8215 drm_atomic_get_new_crtc_state(state, pcrtc);
8216 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8217 struct dm_crtc_state *dm_old_crtc_state =
8218 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8219 int planes_count = 0, vpos, hpos;
8221 unsigned long flags;
8222 struct amdgpu_bo *abo;
8223 uint32_t target_vblank, last_flip_vblank;
8224 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8225 bool pflip_present = false;
8227 struct dc_surface_update surface_updates[MAX_SURFACES];
8228 struct dc_plane_info plane_infos[MAX_SURFACES];
8229 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8230 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8231 struct dc_stream_update stream_update;
8234 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8237 dm_error("Failed to allocate update bundle\n");
8242 * Disable the cursor first if we're disabling all the planes.
8243 * It'll remain on the screen after the planes are re-enabled
8246 if (acrtc_state->active_planes == 0)
8247 amdgpu_dm_commit_cursors(state);
8249 /* update planes when needed */
8250 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8251 struct drm_crtc *crtc = new_plane_state->crtc;
8252 struct drm_crtc_state *new_crtc_state;
8253 struct drm_framebuffer *fb = new_plane_state->fb;
8254 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8255 bool plane_needs_flip;
8256 struct dc_plane_state *dc_plane;
8257 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8259 /* Cursor plane is handled after stream updates */
8260 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8263 if (!fb || !crtc || pcrtc != crtc)
8266 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8267 if (!new_crtc_state->active)
8270 dc_plane = dm_new_plane_state->dc_state;
8272 bundle->surface_updates[planes_count].surface = dc_plane;
8273 if (new_pcrtc_state->color_mgmt_changed) {
8274 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8275 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8276 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8279 fill_dc_scaling_info(new_plane_state,
8280 &bundle->scaling_infos[planes_count]);
8282 bundle->surface_updates[planes_count].scaling_info =
8283 &bundle->scaling_infos[planes_count];
8285 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8287 pflip_present = pflip_present || plane_needs_flip;
8289 if (!plane_needs_flip) {
8294 abo = gem_to_amdgpu_bo(fb->obj[0]);
8297 * Wait for all fences on this FB. Do limited wait to avoid
8298 * deadlock during GPU reset when this fence will not signal
8299 * but we hold reservation lock for the BO.
8301 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
8303 msecs_to_jiffies(5000));
8304 if (unlikely(r <= 0))
8305 DRM_ERROR("Waiting for fences timed out!");
8307 fill_dc_plane_info_and_addr(
8308 dm->adev, new_plane_state,
8310 &bundle->plane_infos[planes_count],
8311 &bundle->flip_addrs[planes_count].address,
8312 afb->tmz_surface, false);
8314 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
8315 new_plane_state->plane->index,
8316 bundle->plane_infos[planes_count].dcc.enable);
8318 bundle->surface_updates[planes_count].plane_info =
8319 &bundle->plane_infos[planes_count];
8322 * Only allow immediate flips for fast updates that don't
8323 * change FB pitch, DCC state, rotation or mirroing.
8325 bundle->flip_addrs[planes_count].flip_immediate =
8326 crtc->state->async_flip &&
8327 acrtc_state->update_type == UPDATE_TYPE_FAST;
8329 timestamp_ns = ktime_get_ns();
8330 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8331 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8332 bundle->surface_updates[planes_count].surface = dc_plane;
8334 if (!bundle->surface_updates[planes_count].surface) {
8335 DRM_ERROR("No surface for CRTC: id=%d\n",
8336 acrtc_attach->crtc_id);
8340 if (plane == pcrtc->primary)
8341 update_freesync_state_on_stream(
8344 acrtc_state->stream,
8346 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
8348 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
8350 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8351 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
8357 if (pflip_present) {
8359 /* Use old throttling in non-vrr fixed refresh rate mode
8360 * to keep flip scheduling based on target vblank counts
8361 * working in a backwards compatible way, e.g., for
8362 * clients using the GLX_OML_sync_control extension or
8363 * DRI3/Present extension with defined target_msc.
8365 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
8368 /* For variable refresh rate mode only:
8369 * Get vblank of last completed flip to avoid > 1 vrr
8370 * flips per video frame by use of throttling, but allow
8371 * flip programming anywhere in the possibly large
8372 * variable vrr vblank interval for fine-grained flip
8373 * timing control and more opportunity to avoid stutter
8374 * on late submission of flips.
8376 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8377 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
8378 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8381 target_vblank = last_flip_vblank + wait_for_vblank;
8384 * Wait until we're out of the vertical blank period before the one
8385 * targeted by the flip
8387 while ((acrtc_attach->enabled &&
8388 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8389 0, &vpos, &hpos, NULL,
8390 NULL, &pcrtc->hwmode)
8391 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8392 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8393 (int)(target_vblank -
8394 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8395 usleep_range(1000, 1100);
8399 * Prepare the flip event for the pageflip interrupt to handle.
8401 * This only works in the case where we've already turned on the
8402 * appropriate hardware blocks (eg. HUBP) so in the transition case
8403 * from 0 -> n planes we have to skip a hardware generated event
8404 * and rely on sending it from software.
8406 if (acrtc_attach->base.state->event &&
8407 acrtc_state->active_planes > 0) {
8408 drm_crtc_vblank_get(pcrtc);
8410 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8412 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8413 prepare_flip_isr(acrtc_attach);
8415 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8418 if (acrtc_state->stream) {
8419 if (acrtc_state->freesync_vrr_info_changed)
8420 bundle->stream_update.vrr_infopacket =
8421 &acrtc_state->stream->vrr_infopacket;
8425 /* Update the planes if changed or disable if we don't have any. */
8426 if ((planes_count || acrtc_state->active_planes == 0) &&
8427 acrtc_state->stream) {
8428 bundle->stream_update.stream = acrtc_state->stream;
8429 if (new_pcrtc_state->mode_changed) {
8430 bundle->stream_update.src = acrtc_state->stream->src;
8431 bundle->stream_update.dst = acrtc_state->stream->dst;
8434 if (new_pcrtc_state->color_mgmt_changed) {
8436 * TODO: This isn't fully correct since we've actually
8437 * already modified the stream in place.
8439 bundle->stream_update.gamut_remap =
8440 &acrtc_state->stream->gamut_remap_matrix;
8441 bundle->stream_update.output_csc_transform =
8442 &acrtc_state->stream->csc_color_matrix;
8443 bundle->stream_update.out_transfer_func =
8444 acrtc_state->stream->out_transfer_func;
8447 acrtc_state->stream->abm_level = acrtc_state->abm_level;
8448 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
8449 bundle->stream_update.abm_level = &acrtc_state->abm_level;
8452 * If FreeSync state on the stream has changed then we need to
8453 * re-adjust the min/max bounds now that DC doesn't handle this
8454 * as part of commit.
8456 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
8457 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8458 dc_stream_adjust_vmin_vmax(
8459 dm->dc, acrtc_state->stream,
8460 &acrtc_attach->dm_irq_params.vrr_params.adjust);
8461 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8463 mutex_lock(&dm->dc_lock);
8464 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8465 acrtc_state->stream->link->psr_settings.psr_allow_active)
8466 amdgpu_dm_psr_disable(acrtc_state->stream);
8468 dc_commit_updates_for_stream(dm->dc,
8469 bundle->surface_updates,
8471 acrtc_state->stream,
8472 &bundle->stream_update,
8476 * Enable or disable the interrupts on the backend.
8478 * Most pipes are put into power gating when unused.
8480 * When power gating is enabled on a pipe we lose the
8481 * interrupt enablement state when power gating is disabled.
8483 * So we need to update the IRQ control state in hardware
8484 * whenever the pipe turns on (since it could be previously
8485 * power gated) or off (since some pipes can't be power gated
8488 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8489 dm_update_pflip_irq_state(drm_to_adev(dev),
8492 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8493 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8494 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8495 amdgpu_dm_link_setup_psr(acrtc_state->stream);
8496 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
8497 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8498 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
8499 amdgpu_dm_psr_enable(acrtc_state->stream);
8502 mutex_unlock(&dm->dc_lock);
8506 * Update cursor state *after* programming all the planes.
8507 * This avoids redundant programming in the case where we're going
8508 * to be disabling a single plane - those pipes are being disabled.
8510 if (acrtc_state->active_planes)
8511 amdgpu_dm_commit_cursors(state);
8517 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8518 struct drm_atomic_state *state)
8520 struct amdgpu_device *adev = drm_to_adev(dev);
8521 struct amdgpu_dm_connector *aconnector;
8522 struct drm_connector *connector;
8523 struct drm_connector_state *old_con_state, *new_con_state;
8524 struct drm_crtc_state *new_crtc_state;
8525 struct dm_crtc_state *new_dm_crtc_state;
8526 const struct dc_stream_status *status;
8529 /* Notify device removals. */
8530 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8531 if (old_con_state->crtc != new_con_state->crtc) {
8532 /* CRTC changes require notification. */
8536 if (!new_con_state->crtc)
8539 new_crtc_state = drm_atomic_get_new_crtc_state(
8540 state, new_con_state->crtc);
8542 if (!new_crtc_state)
8545 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8549 aconnector = to_amdgpu_dm_connector(connector);
8551 mutex_lock(&adev->dm.audio_lock);
8552 inst = aconnector->audio_inst;
8553 aconnector->audio_inst = -1;
8554 mutex_unlock(&adev->dm.audio_lock);
8556 amdgpu_dm_audio_eld_notify(adev, inst);
8559 /* Notify audio device additions. */
8560 for_each_new_connector_in_state(state, connector, new_con_state, i) {
8561 if (!new_con_state->crtc)
8564 new_crtc_state = drm_atomic_get_new_crtc_state(
8565 state, new_con_state->crtc);
8567 if (!new_crtc_state)
8570 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8573 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8574 if (!new_dm_crtc_state->stream)
8577 status = dc_stream_get_status(new_dm_crtc_state->stream);
8581 aconnector = to_amdgpu_dm_connector(connector);
8583 mutex_lock(&adev->dm.audio_lock);
8584 inst = status->audio_inst;
8585 aconnector->audio_inst = inst;
8586 mutex_unlock(&adev->dm.audio_lock);
8588 amdgpu_dm_audio_eld_notify(adev, inst);
8593 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8594 * @crtc_state: the DRM CRTC state
8595 * @stream_state: the DC stream state.
8597 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8598 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8600 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8601 struct dc_stream_state *stream_state)
8603 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8607 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8608 * @state: The atomic state to commit
8610 * This will tell DC to commit the constructed DC state from atomic_check,
8611 * programming the hardware. Any failures here implies a hardware failure, since
8612 * atomic check should have filtered anything non-kosher.
8614 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8616 struct drm_device *dev = state->dev;
8617 struct amdgpu_device *adev = drm_to_adev(dev);
8618 struct amdgpu_display_manager *dm = &adev->dm;
8619 struct dm_atomic_state *dm_state;
8620 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8622 struct drm_crtc *crtc;
8623 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8624 unsigned long flags;
8625 bool wait_for_vblank = true;
8626 struct drm_connector *connector;
8627 struct drm_connector_state *old_con_state, *new_con_state;
8628 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8629 int crtc_disable_count = 0;
8630 bool mode_set_reset_required = false;
8632 trace_amdgpu_dm_atomic_commit_tail_begin(state);
8634 drm_atomic_helper_update_legacy_modeset_state(dev, state);
8636 dm_state = dm_atomic_get_new_state(state);
8637 if (dm_state && dm_state->context) {
8638 dc_state = dm_state->context;
8640 /* No state changes, retain current state. */
8641 dc_state_temp = dc_create_state(dm->dc);
8642 ASSERT(dc_state_temp);
8643 dc_state = dc_state_temp;
8644 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8647 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8648 new_crtc_state, i) {
8649 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8651 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8653 if (old_crtc_state->active &&
8654 (!new_crtc_state->active ||
8655 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8656 manage_dm_interrupts(adev, acrtc, false);
8657 dc_stream_release(dm_old_crtc_state->stream);
8661 drm_atomic_helper_calc_timestamping_constants(state);
8663 /* update changed items */
8664 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8665 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8667 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8668 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8671 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8672 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8673 "connectors_changed:%d\n",
8675 new_crtc_state->enable,
8676 new_crtc_state->active,
8677 new_crtc_state->planes_changed,
8678 new_crtc_state->mode_changed,
8679 new_crtc_state->active_changed,
8680 new_crtc_state->connectors_changed);
8682 /* Disable cursor if disabling crtc */
8683 if (old_crtc_state->active && !new_crtc_state->active) {
8684 struct dc_cursor_position position;
8686 memset(&position, 0, sizeof(position));
8687 mutex_lock(&dm->dc_lock);
8688 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8689 mutex_unlock(&dm->dc_lock);
8692 /* Copy all transient state flags into dc state */
8693 if (dm_new_crtc_state->stream) {
8694 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8695 dm_new_crtc_state->stream);
8698 /* handles headless hotplug case, updating new_state and
8699 * aconnector as needed
8702 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8704 DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8706 if (!dm_new_crtc_state->stream) {
8708 * this could happen because of issues with
8709 * userspace notifications delivery.
8710 * In this case userspace tries to set mode on
8711 * display which is disconnected in fact.
8712 * dc_sink is NULL in this case on aconnector.
8713 * We expect reset mode will come soon.
8715 * This can also happen when unplug is done
8716 * during resume sequence ended
8718 * In this case, we want to pretend we still
8719 * have a sink to keep the pipe running so that
8720 * hw state is consistent with the sw state
8722 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8723 __func__, acrtc->base.base.id);
8727 if (dm_old_crtc_state->stream)
8728 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8730 pm_runtime_get_noresume(dev->dev);
8732 acrtc->enabled = true;
8733 acrtc->hw_mode = new_crtc_state->mode;
8734 crtc->hwmode = new_crtc_state->mode;
8735 mode_set_reset_required = true;
8736 } else if (modereset_required(new_crtc_state)) {
8737 DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8738 /* i.e. reset mode */
8739 if (dm_old_crtc_state->stream)
8740 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8742 mode_set_reset_required = true;
8744 } /* for_each_crtc_in_state() */
8747 /* if there mode set or reset, disable eDP PSR */
8748 if (mode_set_reset_required)
8749 amdgpu_dm_psr_disable_all(dm);
8751 dm_enable_per_frame_crtc_master_sync(dc_state);
8752 mutex_lock(&dm->dc_lock);
8753 WARN_ON(!dc_commit_state(dm->dc, dc_state));
8754 #if defined(CONFIG_DRM_AMD_DC_DCN)
8755 /* Allow idle optimization when vblank count is 0 for display off */
8756 if (dm->active_vblank_irq_count == 0)
8757 dc_allow_idle_optimizations(dm->dc,true);
8759 mutex_unlock(&dm->dc_lock);
8762 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8763 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8765 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8767 if (dm_new_crtc_state->stream != NULL) {
8768 const struct dc_stream_status *status =
8769 dc_stream_get_status(dm_new_crtc_state->stream);
8772 status = dc_stream_get_status_from_state(dc_state,
8773 dm_new_crtc_state->stream);
8775 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8777 acrtc->otg_inst = status->primary_otg_inst;
8780 #ifdef CONFIG_DRM_AMD_DC_HDCP
8781 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8782 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8783 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8784 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8786 new_crtc_state = NULL;
8789 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8791 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8793 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8794 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8795 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8796 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8797 dm_new_con_state->update_hdcp = true;
8801 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8802 hdcp_update_display(
8803 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8804 new_con_state->hdcp_content_type,
8805 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
8809 /* Handle connector state changes */
8810 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8811 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8812 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8813 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8814 struct dc_surface_update dummy_updates[MAX_SURFACES];
8815 struct dc_stream_update stream_update;
8816 struct dc_info_packet hdr_packet;
8817 struct dc_stream_status *status = NULL;
8818 bool abm_changed, hdr_changed, scaling_changed;
8820 memset(&dummy_updates, 0, sizeof(dummy_updates));
8821 memset(&stream_update, 0, sizeof(stream_update));
8824 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8825 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8828 /* Skip any modesets/resets */
8829 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8832 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8833 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8835 scaling_changed = is_scaling_state_different(dm_new_con_state,
8838 abm_changed = dm_new_crtc_state->abm_level !=
8839 dm_old_crtc_state->abm_level;
8842 is_hdr_metadata_different(old_con_state, new_con_state);
8844 if (!scaling_changed && !abm_changed && !hdr_changed)
8847 stream_update.stream = dm_new_crtc_state->stream;
8848 if (scaling_changed) {
8849 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8850 dm_new_con_state, dm_new_crtc_state->stream);
8852 stream_update.src = dm_new_crtc_state->stream->src;
8853 stream_update.dst = dm_new_crtc_state->stream->dst;
8857 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8859 stream_update.abm_level = &dm_new_crtc_state->abm_level;
8863 fill_hdr_info_packet(new_con_state, &hdr_packet);
8864 stream_update.hdr_static_metadata = &hdr_packet;
8867 status = dc_stream_get_status(dm_new_crtc_state->stream);
8869 WARN_ON(!status->plane_count);
8872 * TODO: DC refuses to perform stream updates without a dc_surface_update.
8873 * Here we create an empty update on each plane.
8874 * To fix this, DC should permit updating only stream properties.
8876 for (j = 0; j < status->plane_count; j++)
8877 dummy_updates[j].surface = status->plane_states[0];
8880 mutex_lock(&dm->dc_lock);
8881 dc_commit_updates_for_stream(dm->dc,
8883 status->plane_count,
8884 dm_new_crtc_state->stream,
8887 mutex_unlock(&dm->dc_lock);
8890 /* Count number of newly disabled CRTCs for dropping PM refs later. */
8891 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8892 new_crtc_state, i) {
8893 if (old_crtc_state->active && !new_crtc_state->active)
8894 crtc_disable_count++;
8896 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8897 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8899 /* For freesync config update on crtc state and params for irq */
8900 update_stream_irq_parameters(dm, dm_new_crtc_state);
8902 /* Handle vrr on->off / off->on transitions */
8903 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8908 * Enable interrupts for CRTCs that are newly enabled or went through
8909 * a modeset. It was intentionally deferred until after the front end
8910 * state was modified to wait until the OTG was on and so the IRQ
8911 * handlers didn't access stale or invalid state.
8913 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8914 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8915 #ifdef CONFIG_DEBUG_FS
8916 bool configure_crc = false;
8917 enum amdgpu_dm_pipe_crc_source cur_crc_src;
8919 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8921 if (new_crtc_state->active &&
8922 (!old_crtc_state->active ||
8923 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8924 dc_stream_retain(dm_new_crtc_state->stream);
8925 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8926 manage_dm_interrupts(adev, acrtc, true);
8928 #ifdef CONFIG_DEBUG_FS
8930 * Frontend may have changed so reapply the CRC capture
8931 * settings for the stream.
8933 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8934 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8935 cur_crc_src = acrtc->dm_irq_params.crc_src;
8936 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8938 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
8939 configure_crc = true;
8940 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8941 if (amdgpu_dm_crc_window_is_activated(crtc))
8942 configure_crc = false;
8947 amdgpu_dm_crtc_configure_crc_source(
8948 crtc, dm_new_crtc_state, cur_crc_src);
8953 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
8954 if (new_crtc_state->async_flip)
8955 wait_for_vblank = false;
8957 /* update planes when needed per crtc*/
8958 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
8959 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8961 if (dm_new_crtc_state->stream)
8962 amdgpu_dm_commit_planes(state, dc_state, dev,
8963 dm, crtc, wait_for_vblank);
8966 /* Update audio instances for each connector. */
8967 amdgpu_dm_commit_audio(dev, state);
8970 * send vblank event on all events not handled in flip and
8971 * mark consumed event for drm_atomic_helper_commit_hw_done
8973 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8974 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8976 if (new_crtc_state->event)
8977 drm_send_event_locked(dev, &new_crtc_state->event->base);
8979 new_crtc_state->event = NULL;
8981 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8983 /* Signal HW programming completion */
8984 drm_atomic_helper_commit_hw_done(state);
8986 if (wait_for_vblank)
8987 drm_atomic_helper_wait_for_flip_done(dev, state);
8989 drm_atomic_helper_cleanup_planes(dev, state);
8991 /* return the stolen vga memory back to VRAM */
8992 if (!adev->mman.keep_stolen_vga_memory)
8993 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8994 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8997 * Finally, drop a runtime PM reference for each newly disabled CRTC,
8998 * so we can put the GPU into runtime suspend if we're not driving any
9001 for (i = 0; i < crtc_disable_count; i++)
9002 pm_runtime_put_autosuspend(dev->dev);
9003 pm_runtime_mark_last_busy(dev->dev);
9006 dc_release_state(dc_state_temp);
9010 static int dm_force_atomic_commit(struct drm_connector *connector)
9013 struct drm_device *ddev = connector->dev;
9014 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9015 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9016 struct drm_plane *plane = disconnected_acrtc->base.primary;
9017 struct drm_connector_state *conn_state;
9018 struct drm_crtc_state *crtc_state;
9019 struct drm_plane_state *plane_state;
9024 state->acquire_ctx = ddev->mode_config.acquire_ctx;
9026 /* Construct an atomic state to restore previous display setting */
9029 * Attach connectors to drm_atomic_state
9031 conn_state = drm_atomic_get_connector_state(state, connector);
9033 ret = PTR_ERR_OR_ZERO(conn_state);
9037 /* Attach crtc to drm_atomic_state*/
9038 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9040 ret = PTR_ERR_OR_ZERO(crtc_state);
9044 /* force a restore */
9045 crtc_state->mode_changed = true;
9047 /* Attach plane to drm_atomic_state */
9048 plane_state = drm_atomic_get_plane_state(state, plane);
9050 ret = PTR_ERR_OR_ZERO(plane_state);
9054 /* Call commit internally with the state we just constructed */
9055 ret = drm_atomic_commit(state);
9058 drm_atomic_state_put(state);
9060 DRM_ERROR("Restoring old state failed with %i\n", ret);
9066 * This function handles all cases when set mode does not come upon hotplug.
9067 * This includes when a display is unplugged then plugged back into the
9068 * same port and when running without usermode desktop manager supprot
9070 void dm_restore_drm_connector_state(struct drm_device *dev,
9071 struct drm_connector *connector)
9073 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9074 struct amdgpu_crtc *disconnected_acrtc;
9075 struct dm_crtc_state *acrtc_state;
9077 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9080 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9081 if (!disconnected_acrtc)
9084 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9085 if (!acrtc_state->stream)
9089 * If the previous sink is not released and different from the current,
9090 * we deduce we are in a state where we can not rely on usermode call
9091 * to turn on the display, so we do it here
9093 if (acrtc_state->stream->sink != aconnector->dc_sink)
9094 dm_force_atomic_commit(&aconnector->base);
9098 * Grabs all modesetting locks to serialize against any blocking commits,
9099 * Waits for completion of all non blocking commits.
9101 static int do_aquire_global_lock(struct drm_device *dev,
9102 struct drm_atomic_state *state)
9104 struct drm_crtc *crtc;
9105 struct drm_crtc_commit *commit;
9109 * Adding all modeset locks to aquire_ctx will
9110 * ensure that when the framework release it the
9111 * extra locks we are locking here will get released to
9113 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9117 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9118 spin_lock(&crtc->commit_lock);
9119 commit = list_first_entry_or_null(&crtc->commit_list,
9120 struct drm_crtc_commit, commit_entry);
9122 drm_crtc_commit_get(commit);
9123 spin_unlock(&crtc->commit_lock);
9129 * Make sure all pending HW programming completed and
9132 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9135 ret = wait_for_completion_interruptible_timeout(
9136 &commit->flip_done, 10*HZ);
9139 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9140 "timed out\n", crtc->base.id, crtc->name);
9142 drm_crtc_commit_put(commit);
9145 return ret < 0 ? ret : 0;
9148 static void get_freesync_config_for_crtc(
9149 struct dm_crtc_state *new_crtc_state,
9150 struct dm_connector_state *new_con_state)
9152 struct mod_freesync_config config = {0};
9153 struct amdgpu_dm_connector *aconnector =
9154 to_amdgpu_dm_connector(new_con_state->base.connector);
9155 struct drm_display_mode *mode = &new_crtc_state->base.mode;
9156 int vrefresh = drm_mode_vrefresh(mode);
9157 bool fs_vid_mode = false;
9159 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9160 vrefresh >= aconnector->min_vfreq &&
9161 vrefresh <= aconnector->max_vfreq;
9163 if (new_crtc_state->vrr_supported) {
9164 new_crtc_state->stream->ignore_msa_timing_param = true;
9165 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9167 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9168 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9169 config.vsif_supported = true;
9173 config.state = VRR_STATE_ACTIVE_FIXED;
9174 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9176 } else if (new_crtc_state->base.vrr_enabled) {
9177 config.state = VRR_STATE_ACTIVE_VARIABLE;
9179 config.state = VRR_STATE_INACTIVE;
9183 new_crtc_state->freesync_config = config;
9186 static void reset_freesync_config_for_crtc(
9187 struct dm_crtc_state *new_crtc_state)
9189 new_crtc_state->vrr_supported = false;
9191 memset(&new_crtc_state->vrr_infopacket, 0,
9192 sizeof(new_crtc_state->vrr_infopacket));
9196 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9197 struct drm_crtc_state *new_crtc_state)
9199 struct drm_display_mode old_mode, new_mode;
9201 if (!old_crtc_state || !new_crtc_state)
9204 old_mode = old_crtc_state->mode;
9205 new_mode = new_crtc_state->mode;
9207 if (old_mode.clock == new_mode.clock &&
9208 old_mode.hdisplay == new_mode.hdisplay &&
9209 old_mode.vdisplay == new_mode.vdisplay &&
9210 old_mode.htotal == new_mode.htotal &&
9211 old_mode.vtotal != new_mode.vtotal &&
9212 old_mode.hsync_start == new_mode.hsync_start &&
9213 old_mode.vsync_start != new_mode.vsync_start &&
9214 old_mode.hsync_end == new_mode.hsync_end &&
9215 old_mode.vsync_end != new_mode.vsync_end &&
9216 old_mode.hskew == new_mode.hskew &&
9217 old_mode.vscan == new_mode.vscan &&
9218 (old_mode.vsync_end - old_mode.vsync_start) ==
9219 (new_mode.vsync_end - new_mode.vsync_start))
9225 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9226 uint64_t num, den, res;
9227 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9229 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9231 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9232 den = (unsigned long long)new_crtc_state->mode.htotal *
9233 (unsigned long long)new_crtc_state->mode.vtotal;
9235 res = div_u64(num, den);
9236 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9239 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9240 struct drm_atomic_state *state,
9241 struct drm_crtc *crtc,
9242 struct drm_crtc_state *old_crtc_state,
9243 struct drm_crtc_state *new_crtc_state,
9245 bool *lock_and_validation_needed)
9247 struct dm_atomic_state *dm_state = NULL;
9248 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9249 struct dc_stream_state *new_stream;
9253 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9254 * update changed items
9256 struct amdgpu_crtc *acrtc = NULL;
9257 struct amdgpu_dm_connector *aconnector = NULL;
9258 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9259 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
9263 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9264 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9265 acrtc = to_amdgpu_crtc(crtc);
9266 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
9268 /* TODO This hack should go away */
9269 if (aconnector && enable) {
9270 /* Make sure fake sink is created in plug-in scenario */
9271 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9273 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9276 if (IS_ERR(drm_new_conn_state)) {
9277 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9281 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9282 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
9284 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9287 new_stream = create_validate_stream_for_sink(aconnector,
9288 &new_crtc_state->mode,
9290 dm_old_crtc_state->stream);
9293 * we can have no stream on ACTION_SET if a display
9294 * was disconnected during S3, in this case it is not an
9295 * error, the OS will be updated after detection, and
9296 * will do the right thing on next atomic commit
9300 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9301 __func__, acrtc->base.base.id);
9307 * TODO: Check VSDB bits to decide whether this should
9308 * be enabled or not.
9310 new_stream->triggered_crtc_reset.enabled =
9311 dm->force_timing_sync;
9313 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9315 ret = fill_hdr_info_packet(drm_new_conn_state,
9316 &new_stream->hdr_static_metadata);
9321 * If we already removed the old stream from the context
9322 * (and set the new stream to NULL) then we can't reuse
9323 * the old stream even if the stream and scaling are unchanged.
9324 * We'll hit the BUG_ON and black screen.
9326 * TODO: Refactor this function to allow this check to work
9327 * in all conditions.
9329 if (amdgpu_freesync_vid_mode &&
9330 dm_new_crtc_state->stream &&
9331 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9334 if (dm_new_crtc_state->stream &&
9335 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9336 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9337 new_crtc_state->mode_changed = false;
9338 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9339 new_crtc_state->mode_changed);
9343 /* mode_changed flag may get updated above, need to check again */
9344 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9348 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9349 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9350 "connectors_changed:%d\n",
9352 new_crtc_state->enable,
9353 new_crtc_state->active,
9354 new_crtc_state->planes_changed,
9355 new_crtc_state->mode_changed,
9356 new_crtc_state->active_changed,
9357 new_crtc_state->connectors_changed);
9359 /* Remove stream for any changed/disabled CRTC */
9362 if (!dm_old_crtc_state->stream)
9365 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
9366 is_timing_unchanged_for_freesync(new_crtc_state,
9368 new_crtc_state->mode_changed = false;
9370 "Mode change not required for front porch change, "
9371 "setting mode_changed to %d",
9372 new_crtc_state->mode_changed);
9374 set_freesync_fixed_config(dm_new_crtc_state);
9377 } else if (amdgpu_freesync_vid_mode && aconnector &&
9378 is_freesync_video_mode(&new_crtc_state->mode,
9380 set_freesync_fixed_config(dm_new_crtc_state);
9383 ret = dm_atomic_get_state(state, &dm_state);
9387 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9390 /* i.e. reset mode */
9391 if (dc_remove_stream_from_ctx(
9394 dm_old_crtc_state->stream) != DC_OK) {
9399 dc_stream_release(dm_old_crtc_state->stream);
9400 dm_new_crtc_state->stream = NULL;
9402 reset_freesync_config_for_crtc(dm_new_crtc_state);
9404 *lock_and_validation_needed = true;
9406 } else {/* Add stream for any updated/enabled CRTC */
9408 * Quick fix to prevent NULL pointer on new_stream when
9409 * added MST connectors not found in existing crtc_state in the chained mode
9410 * TODO: need to dig out the root cause of that
9412 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
9415 if (modereset_required(new_crtc_state))
9418 if (modeset_required(new_crtc_state, new_stream,
9419 dm_old_crtc_state->stream)) {
9421 WARN_ON(dm_new_crtc_state->stream);
9423 ret = dm_atomic_get_state(state, &dm_state);
9427 dm_new_crtc_state->stream = new_stream;
9429 dc_stream_retain(new_stream);
9431 DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
9434 if (dc_add_stream_to_ctx(
9437 dm_new_crtc_state->stream) != DC_OK) {
9442 *lock_and_validation_needed = true;
9447 /* Release extra reference */
9449 dc_stream_release(new_stream);
9452 * We want to do dc stream updates that do not require a
9453 * full modeset below.
9455 if (!(enable && aconnector && new_crtc_state->active))
9458 * Given above conditions, the dc state cannot be NULL because:
9459 * 1. We're in the process of enabling CRTCs (just been added
9460 * to the dc context, or already is on the context)
9461 * 2. Has a valid connector attached, and
9462 * 3. Is currently active and enabled.
9463 * => The dc stream state currently exists.
9465 BUG_ON(dm_new_crtc_state->stream == NULL);
9467 /* Scaling or underscan settings */
9468 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
9469 update_stream_scaling_settings(
9470 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
9473 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9476 * Color management settings. We also update color properties
9477 * when a modeset is needed, to ensure it gets reprogrammed.
9479 if (dm_new_crtc_state->base.color_mgmt_changed ||
9480 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9481 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
9486 /* Update Freesync settings. */
9487 get_freesync_config_for_crtc(dm_new_crtc_state,
9494 dc_stream_release(new_stream);
9498 static bool should_reset_plane(struct drm_atomic_state *state,
9499 struct drm_plane *plane,
9500 struct drm_plane_state *old_plane_state,
9501 struct drm_plane_state *new_plane_state)
9503 struct drm_plane *other;
9504 struct drm_plane_state *old_other_state, *new_other_state;
9505 struct drm_crtc_state *new_crtc_state;
9509 * TODO: Remove this hack once the checks below are sufficient
9510 * enough to determine when we need to reset all the planes on
9513 if (state->allow_modeset)
9516 /* Exit early if we know that we're adding or removing the plane. */
9517 if (old_plane_state->crtc != new_plane_state->crtc)
9520 /* old crtc == new_crtc == NULL, plane not in context. */
9521 if (!new_plane_state->crtc)
9525 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9527 if (!new_crtc_state)
9530 /* CRTC Degamma changes currently require us to recreate planes. */
9531 if (new_crtc_state->color_mgmt_changed)
9534 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9538 * If there are any new primary or overlay planes being added or
9539 * removed then the z-order can potentially change. To ensure
9540 * correct z-order and pipe acquisition the current DC architecture
9541 * requires us to remove and recreate all existing planes.
9543 * TODO: Come up with a more elegant solution for this.
9545 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9546 struct amdgpu_framebuffer *old_afb, *new_afb;
9547 if (other->type == DRM_PLANE_TYPE_CURSOR)
9550 if (old_other_state->crtc != new_plane_state->crtc &&
9551 new_other_state->crtc != new_plane_state->crtc)
9554 if (old_other_state->crtc != new_other_state->crtc)
9557 /* Src/dst size and scaling updates. */
9558 if (old_other_state->src_w != new_other_state->src_w ||
9559 old_other_state->src_h != new_other_state->src_h ||
9560 old_other_state->crtc_w != new_other_state->crtc_w ||
9561 old_other_state->crtc_h != new_other_state->crtc_h)
9564 /* Rotation / mirroring updates. */
9565 if (old_other_state->rotation != new_other_state->rotation)
9568 /* Blending updates. */
9569 if (old_other_state->pixel_blend_mode !=
9570 new_other_state->pixel_blend_mode)
9573 /* Alpha updates. */
9574 if (old_other_state->alpha != new_other_state->alpha)
9577 /* Colorspace changes. */
9578 if (old_other_state->color_range != new_other_state->color_range ||
9579 old_other_state->color_encoding != new_other_state->color_encoding)
9582 /* Framebuffer checks fall at the end. */
9583 if (!old_other_state->fb || !new_other_state->fb)
9586 /* Pixel format changes can require bandwidth updates. */
9587 if (old_other_state->fb->format != new_other_state->fb->format)
9590 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9591 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9593 /* Tiling and DCC changes also require bandwidth updates. */
9594 if (old_afb->tiling_flags != new_afb->tiling_flags ||
9595 old_afb->base.modifier != new_afb->base.modifier)
9602 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9603 struct drm_plane_state *new_plane_state,
9604 struct drm_framebuffer *fb)
9606 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9607 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9611 if (fb->width > new_acrtc->max_cursor_width ||
9612 fb->height > new_acrtc->max_cursor_height) {
9613 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9614 new_plane_state->fb->width,
9615 new_plane_state->fb->height);
9618 if (new_plane_state->src_w != fb->width << 16 ||
9619 new_plane_state->src_h != fb->height << 16) {
9620 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9624 /* Pitch in pixels */
9625 pitch = fb->pitches[0] / fb->format->cpp[0];
9627 if (fb->width != pitch) {
9628 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9637 /* FB pitch is supported by cursor plane */
9640 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9644 /* Core DRM takes care of checking FB modifiers, so we only need to
9645 * check tiling flags when the FB doesn't have a modifier. */
9646 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9647 if (adev->family < AMDGPU_FAMILY_AI) {
9648 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9649 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9650 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9652 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9655 DRM_DEBUG_ATOMIC("Cursor FB not linear");
9663 static int dm_update_plane_state(struct dc *dc,
9664 struct drm_atomic_state *state,
9665 struct drm_plane *plane,
9666 struct drm_plane_state *old_plane_state,
9667 struct drm_plane_state *new_plane_state,
9669 bool *lock_and_validation_needed)
9672 struct dm_atomic_state *dm_state = NULL;
9673 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9674 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9675 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9676 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9677 struct amdgpu_crtc *new_acrtc;
9682 new_plane_crtc = new_plane_state->crtc;
9683 old_plane_crtc = old_plane_state->crtc;
9684 dm_new_plane_state = to_dm_plane_state(new_plane_state);
9685 dm_old_plane_state = to_dm_plane_state(old_plane_state);
9687 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9688 if (!enable || !new_plane_crtc ||
9689 drm_atomic_plane_disabling(plane->state, new_plane_state))
9692 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9694 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9695 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9699 if (new_plane_state->fb) {
9700 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9701 new_plane_state->fb);
9709 needs_reset = should_reset_plane(state, plane, old_plane_state,
9712 /* Remove any changed/removed planes */
9717 if (!old_plane_crtc)
9720 old_crtc_state = drm_atomic_get_old_crtc_state(
9721 state, old_plane_crtc);
9722 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9724 if (!dm_old_crtc_state->stream)
9727 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9728 plane->base.id, old_plane_crtc->base.id);
9730 ret = dm_atomic_get_state(state, &dm_state);
9734 if (!dc_remove_plane_from_context(
9736 dm_old_crtc_state->stream,
9737 dm_old_plane_state->dc_state,
9738 dm_state->context)) {
9744 dc_plane_state_release(dm_old_plane_state->dc_state);
9745 dm_new_plane_state->dc_state = NULL;
9747 *lock_and_validation_needed = true;
9749 } else { /* Add new planes */
9750 struct dc_plane_state *dc_new_plane_state;
9752 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9755 if (!new_plane_crtc)
9758 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9759 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9761 if (!dm_new_crtc_state->stream)
9767 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9771 WARN_ON(dm_new_plane_state->dc_state);
9773 dc_new_plane_state = dc_create_plane_state(dc);
9774 if (!dc_new_plane_state)
9777 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
9778 plane->base.id, new_plane_crtc->base.id);
9780 ret = fill_dc_plane_attributes(
9781 drm_to_adev(new_plane_crtc->dev),
9786 dc_plane_state_release(dc_new_plane_state);
9790 ret = dm_atomic_get_state(state, &dm_state);
9792 dc_plane_state_release(dc_new_plane_state);
9797 * Any atomic check errors that occur after this will
9798 * not need a release. The plane state will be attached
9799 * to the stream, and therefore part of the atomic
9800 * state. It'll be released when the atomic state is
9803 if (!dc_add_plane_to_context(
9805 dm_new_crtc_state->stream,
9807 dm_state->context)) {
9809 dc_plane_state_release(dc_new_plane_state);
9813 dm_new_plane_state->dc_state = dc_new_plane_state;
9815 /* Tell DC to do a full surface update every time there
9816 * is a plane change. Inefficient, but works for now.
9818 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9820 *lock_and_validation_needed = true;
9827 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9828 struct drm_crtc *crtc,
9829 struct drm_crtc_state *new_crtc_state)
9831 struct drm_plane_state *new_cursor_state, *new_primary_state;
9832 int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9834 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9835 * cursor per pipe but it's going to inherit the scaling and
9836 * positioning from the underlying pipe. Check the cursor plane's
9837 * blending properties match the primary plane's. */
9839 new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9840 new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9841 if (!new_cursor_state || !new_primary_state ||
9842 !new_cursor_state->fb || !new_primary_state->fb) {
9846 cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9847 (new_cursor_state->src_w >> 16);
9848 cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9849 (new_cursor_state->src_h >> 16);
9851 primary_scale_w = new_primary_state->crtc_w * 1000 /
9852 (new_primary_state->src_w >> 16);
9853 primary_scale_h = new_primary_state->crtc_h * 1000 /
9854 (new_primary_state->src_h >> 16);
9856 if (cursor_scale_w != primary_scale_w ||
9857 cursor_scale_h != primary_scale_h) {
9858 DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9865 #if defined(CONFIG_DRM_AMD_DC_DCN)
9866 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9868 struct drm_connector *connector;
9869 struct drm_connector_state *conn_state;
9870 struct amdgpu_dm_connector *aconnector = NULL;
9872 for_each_new_connector_in_state(state, connector, conn_state, i) {
9873 if (conn_state->crtc != crtc)
9876 aconnector = to_amdgpu_dm_connector(connector);
9877 if (!aconnector->port || !aconnector->mst_port)
9886 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9890 static int validate_overlay(struct drm_atomic_state *state)
9893 struct drm_plane *plane;
9894 struct drm_plane_state *old_plane_state, *new_plane_state;
9895 struct drm_plane_state *primary_state, *overlay_state = NULL;
9897 /* Check if primary plane is contained inside overlay */
9898 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9899 if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
9900 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9903 overlay_state = new_plane_state;
9908 /* check if we're making changes to the overlay plane */
9912 /* check if overlay plane is enabled */
9913 if (!overlay_state->crtc)
9916 /* find the primary plane for the CRTC that the overlay is enabled on */
9917 primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
9918 if (IS_ERR(primary_state))
9919 return PTR_ERR(primary_state);
9921 /* check if primary plane is enabled */
9922 if (!primary_state->crtc)
9925 /* Perform the bounds check to ensure the overlay plane covers the primary */
9926 if (primary_state->crtc_x < overlay_state->crtc_x ||
9927 primary_state->crtc_y < overlay_state->crtc_y ||
9928 primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
9929 primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
9930 DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
9938 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9939 * @dev: The DRM device
9940 * @state: The atomic state to commit
9942 * Validate that the given atomic state is programmable by DC into hardware.
9943 * This involves constructing a &struct dc_state reflecting the new hardware
9944 * state we wish to commit, then querying DC to see if it is programmable. It's
9945 * important not to modify the existing DC state. Otherwise, atomic_check
9946 * may unexpectedly commit hardware changes.
9948 * When validating the DC state, it's important that the right locks are
9949 * acquired. For full updates case which removes/adds/updates streams on one
9950 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9951 * that any such full update commit will wait for completion of any outstanding
9952 * flip using DRMs synchronization events.
9954 * Note that DM adds the affected connectors for all CRTCs in state, when that
9955 * might not seem necessary. This is because DC stream creation requires the
9956 * DC sink, which is tied to the DRM connector state. Cleaning this up should
9957 * be possible but non-trivial - a possible TODO item.
9959 * Return: -Error code if validation failed.
9961 static int amdgpu_dm_atomic_check(struct drm_device *dev,
9962 struct drm_atomic_state *state)
9964 struct amdgpu_device *adev = drm_to_adev(dev);
9965 struct dm_atomic_state *dm_state = NULL;
9966 struct dc *dc = adev->dm.dc;
9967 struct drm_connector *connector;
9968 struct drm_connector_state *old_con_state, *new_con_state;
9969 struct drm_crtc *crtc;
9970 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9971 struct drm_plane *plane;
9972 struct drm_plane_state *old_plane_state, *new_plane_state;
9973 enum dc_status status;
9975 bool lock_and_validation_needed = false;
9976 struct dm_crtc_state *dm_old_crtc_state;
9978 trace_amdgpu_dm_atomic_check_begin(state);
9980 ret = drm_atomic_helper_check_modeset(dev, state);
9984 /* Check connector changes */
9985 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9986 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9987 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9989 /* Skip connectors that are disabled or part of modeset already. */
9990 if (!old_con_state->crtc && !new_con_state->crtc)
9993 if (!new_con_state->crtc)
9996 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9997 if (IS_ERR(new_crtc_state)) {
9998 ret = PTR_ERR(new_crtc_state);
10002 if (dm_old_con_state->abm_level !=
10003 dm_new_con_state->abm_level)
10004 new_crtc_state->connectors_changed = true;
10007 #if defined(CONFIG_DRM_AMD_DC_DCN)
10008 if (dc_resource_is_dsc_encoding_supported(dc)) {
10009 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10010 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10011 ret = add_affected_mst_dsc_crtcs(state, crtc);
10018 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10019 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10021 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10022 !new_crtc_state->color_mgmt_changed &&
10023 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10024 dm_old_crtc_state->dsc_force_changed == false)
10027 if (!new_crtc_state->enable)
10030 ret = drm_atomic_add_affected_connectors(state, crtc);
10034 ret = drm_atomic_add_affected_planes(state, crtc);
10038 if (dm_old_crtc_state->dsc_force_changed)
10039 new_crtc_state->mode_changed = true;
10043 * Add all primary and overlay planes on the CRTC to the state
10044 * whenever a plane is enabled to maintain correct z-ordering
10045 * and to enable fast surface updates.
10047 drm_for_each_crtc(crtc, dev) {
10048 bool modified = false;
10050 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10051 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10054 if (new_plane_state->crtc == crtc ||
10055 old_plane_state->crtc == crtc) {
10064 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10065 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10069 drm_atomic_get_plane_state(state, plane);
10071 if (IS_ERR(new_plane_state)) {
10072 ret = PTR_ERR(new_plane_state);
10078 /* Remove exiting planes if they are modified */
10079 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10080 ret = dm_update_plane_state(dc, state, plane,
10084 &lock_and_validation_needed);
10089 /* Disable all crtcs which require disable */
10090 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10091 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10095 &lock_and_validation_needed);
10100 /* Enable all crtcs which require enable */
10101 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10102 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10106 &lock_and_validation_needed);
10111 ret = validate_overlay(state);
10115 /* Add new/modified planes */
10116 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10117 ret = dm_update_plane_state(dc, state, plane,
10121 &lock_and_validation_needed);
10126 /* Run this here since we want to validate the streams we created */
10127 ret = drm_atomic_helper_check_planes(dev, state);
10131 /* Check cursor planes scaling */
10132 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10133 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10138 if (state->legacy_cursor_update) {
10140 * This is a fast cursor update coming from the plane update
10141 * helper, check if it can be done asynchronously for better
10144 state->async_update =
10145 !drm_atomic_helper_async_check(dev, state);
10148 * Skip the remaining global validation if this is an async
10149 * update. Cursor updates can be done without affecting
10150 * state or bandwidth calcs and this avoids the performance
10151 * penalty of locking the private state object and
10152 * allocating a new dc_state.
10154 if (state->async_update)
10158 /* Check scaling and underscan changes*/
10159 /* TODO Removed scaling changes validation due to inability to commit
10160 * new stream into context w\o causing full reset. Need to
10161 * decide how to handle.
10163 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10164 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10165 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10166 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10168 /* Skip any modesets/resets */
10169 if (!acrtc || drm_atomic_crtc_needs_modeset(
10170 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10173 /* Skip any thing not scale or underscan changes */
10174 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10177 lock_and_validation_needed = true;
10181 * Streams and planes are reset when there are changes that affect
10182 * bandwidth. Anything that affects bandwidth needs to go through
10183 * DC global validation to ensure that the configuration can be applied
10186 * We have to currently stall out here in atomic_check for outstanding
10187 * commits to finish in this case because our IRQ handlers reference
10188 * DRM state directly - we can end up disabling interrupts too early
10191 * TODO: Remove this stall and drop DM state private objects.
10193 if (lock_and_validation_needed) {
10194 ret = dm_atomic_get_state(state, &dm_state);
10198 ret = do_aquire_global_lock(dev, state);
10202 #if defined(CONFIG_DRM_AMD_DC_DCN)
10203 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
10206 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
10212 * Perform validation of MST topology in the state:
10213 * We need to perform MST atomic check before calling
10214 * dc_validate_global_state(), or there is a chance
10215 * to get stuck in an infinite loop and hang eventually.
10217 ret = drm_dp_mst_atomic_check(state);
10220 status = dc_validate_global_state(dc, dm_state->context, false);
10221 if (status != DC_OK) {
10222 DC_LOG_WARNING("DC global validation failure: %s (%d)",
10223 dc_status_to_str(status), status);
10229 * The commit is a fast update. Fast updates shouldn't change
10230 * the DC context, affect global validation, and can have their
10231 * commit work done in parallel with other commits not touching
10232 * the same resource. If we have a new DC context as part of
10233 * the DM atomic state from validation we need to free it and
10234 * retain the existing one instead.
10236 * Furthermore, since the DM atomic state only contains the DC
10237 * context and can safely be annulled, we can free the state
10238 * and clear the associated private object now to free
10239 * some memory and avoid a possible use-after-free later.
10242 for (i = 0; i < state->num_private_objs; i++) {
10243 struct drm_private_obj *obj = state->private_objs[i].ptr;
10245 if (obj->funcs == adev->dm.atomic_obj.funcs) {
10246 int j = state->num_private_objs-1;
10248 dm_atomic_destroy_state(obj,
10249 state->private_objs[i].state);
10251 /* If i is not at the end of the array then the
10252 * last element needs to be moved to where i was
10253 * before the array can safely be truncated.
10256 state->private_objs[i] =
10257 state->private_objs[j];
10259 state->private_objs[j].ptr = NULL;
10260 state->private_objs[j].state = NULL;
10261 state->private_objs[j].old_state = NULL;
10262 state->private_objs[j].new_state = NULL;
10264 state->num_private_objs = j;
10270 /* Store the overall update type for use later in atomic check. */
10271 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10272 struct dm_crtc_state *dm_new_crtc_state =
10273 to_dm_crtc_state(new_crtc_state);
10275 dm_new_crtc_state->update_type = lock_and_validation_needed ?
10280 /* Must be success */
10283 trace_amdgpu_dm_atomic_check_finish(state, ret);
10288 if (ret == -EDEADLK)
10289 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
10290 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
10291 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
10293 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
10295 trace_amdgpu_dm_atomic_check_finish(state, ret);
10300 static bool is_dp_capable_without_timing_msa(struct dc *dc,
10301 struct amdgpu_dm_connector *amdgpu_dm_connector)
10304 bool capable = false;
10306 if (amdgpu_dm_connector->dc_link &&
10307 dm_helpers_dp_read_dpcd(
10309 amdgpu_dm_connector->dc_link,
10310 DP_DOWN_STREAM_PORT_COUNT,
10312 sizeof(dpcd_data))) {
10313 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10319 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10320 uint8_t *edid_ext, int len,
10321 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10324 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10325 struct dc *dc = adev->dm.dc;
10327 /* send extension block to DMCU for parsing */
10328 for (i = 0; i < len; i += 8) {
10332 /* send 8 bytes a time */
10333 if (!dc_edid_parser_send_cea(dc, i, len, &edid_ext[i], 8))
10337 /* EDID block sent completed, expect result */
10338 int version, min_rate, max_rate;
10340 res = dc_edid_parser_recv_amd_vsdb(dc, &version, &min_rate, &max_rate);
10342 /* amd vsdb found */
10343 vsdb_info->freesync_supported = 1;
10344 vsdb_info->amd_vsdb_version = version;
10345 vsdb_info->min_refresh_rate_hz = min_rate;
10346 vsdb_info->max_refresh_rate_hz = max_rate;
10354 res = dc_edid_parser_recv_cea_ack(dc, &offset);
10362 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10363 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10365 uint8_t *edid_ext = NULL;
10367 bool valid_vsdb_found = false;
10369 /*----- drm_find_cea_extension() -----*/
10370 /* No EDID or EDID extensions */
10371 if (edid == NULL || edid->extensions == 0)
10374 /* Find CEA extension */
10375 for (i = 0; i < edid->extensions; i++) {
10376 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10377 if (edid_ext[0] == CEA_EXT)
10381 if (i == edid->extensions)
10384 /*----- cea_db_offsets() -----*/
10385 if (edid_ext[0] != CEA_EXT)
10388 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
10390 return valid_vsdb_found ? i : -ENODEV;
10393 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10397 struct detailed_timing *timing;
10398 struct detailed_non_pixel *data;
10399 struct detailed_data_monitor_range *range;
10400 struct amdgpu_dm_connector *amdgpu_dm_connector =
10401 to_amdgpu_dm_connector(connector);
10402 struct dm_connector_state *dm_con_state = NULL;
10404 struct drm_device *dev = connector->dev;
10405 struct amdgpu_device *adev = drm_to_adev(dev);
10406 bool freesync_capable = false;
10407 struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
10409 if (!connector->state) {
10410 DRM_ERROR("%s - Connector has no state", __func__);
10415 dm_con_state = to_dm_connector_state(connector->state);
10417 amdgpu_dm_connector->min_vfreq = 0;
10418 amdgpu_dm_connector->max_vfreq = 0;
10419 amdgpu_dm_connector->pixel_clock_mhz = 0;
10424 dm_con_state = to_dm_connector_state(connector->state);
10426 if (!amdgpu_dm_connector->dc_sink) {
10427 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
10430 if (!adev->dm.freesync_module)
10434 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10435 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
10436 bool edid_check_required = false;
10439 edid_check_required = is_dp_capable_without_timing_msa(
10441 amdgpu_dm_connector);
10444 if (edid_check_required == true && (edid->version > 1 ||
10445 (edid->version == 1 && edid->revision > 1))) {
10446 for (i = 0; i < 4; i++) {
10448 timing = &edid->detailed_timings[i];
10449 data = &timing->data.other_data;
10450 range = &data->data.range;
10452 * Check if monitor has continuous frequency mode
10454 if (data->type != EDID_DETAIL_MONITOR_RANGE)
10457 * Check for flag range limits only. If flag == 1 then
10458 * no additional timing information provided.
10459 * Default GTF, GTF Secondary curve and CVT are not
10462 if (range->flags != 1)
10465 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10466 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10467 amdgpu_dm_connector->pixel_clock_mhz =
10468 range->pixel_clock_mhz * 10;
10470 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10471 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10476 if (amdgpu_dm_connector->max_vfreq -
10477 amdgpu_dm_connector->min_vfreq > 10) {
10479 freesync_capable = true;
10482 } else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10483 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10484 if (i >= 0 && vsdb_info.freesync_supported) {
10485 timing = &edid->detailed_timings[i];
10486 data = &timing->data.other_data;
10488 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10489 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10490 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10491 freesync_capable = true;
10493 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10494 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10500 dm_con_state->freesync_capable = freesync_capable;
10502 if (connector->vrr_capable_property)
10503 drm_connector_set_vrr_capable_property(connector,
10507 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
10509 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
10511 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
10513 if (link->type == dc_connection_none)
10515 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
10516 dpcd_data, sizeof(dpcd_data))) {
10517 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
10519 if (dpcd_data[0] == 0) {
10520 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
10521 link->psr_settings.psr_feature_enabled = false;
10523 link->psr_settings.psr_version = DC_PSR_VERSION_1;
10524 link->psr_settings.psr_feature_enabled = true;
10527 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
10532 * amdgpu_dm_link_setup_psr() - configure psr link
10533 * @stream: stream state
10535 * Return: true if success
10537 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
10539 struct dc_link *link = NULL;
10540 struct psr_config psr_config = {0};
10541 struct psr_context psr_context = {0};
10544 if (stream == NULL)
10547 link = stream->link;
10549 psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
10551 if (psr_config.psr_version > 0) {
10552 psr_config.psr_exit_link_training_required = 0x1;
10553 psr_config.psr_frame_capture_indication_req = 0;
10554 psr_config.psr_rfb_setup_time = 0x37;
10555 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
10556 psr_config.allow_smu_optimizations = 0x0;
10558 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
10561 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
10567 * amdgpu_dm_psr_enable() - enable psr f/w
10568 * @stream: stream state
10570 * Return: true if success
10572 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
10574 struct dc_link *link = stream->link;
10575 unsigned int vsync_rate_hz = 0;
10576 struct dc_static_screen_params params = {0};
10577 /* Calculate number of static frames before generating interrupt to
10580 // Init fail safe of 2 frames static
10581 unsigned int num_frames_static = 2;
10583 DRM_DEBUG_DRIVER("Enabling psr...\n");
10585 vsync_rate_hz = div64_u64(div64_u64((
10586 stream->timing.pix_clk_100hz * 100),
10587 stream->timing.v_total),
10588 stream->timing.h_total);
10591 * Calculate number of frames such that at least 30 ms of time has
10594 if (vsync_rate_hz != 0) {
10595 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
10596 num_frames_static = (30000 / frame_time_microsec) + 1;
10599 params.triggers.cursor_update = true;
10600 params.triggers.overlay_update = true;
10601 params.triggers.surface_update = true;
10602 params.num_frames = num_frames_static;
10604 dc_stream_set_static_screen_params(link->ctx->dc,
10608 return dc_link_set_psr_allow_active(link, true, false, false);
10612 * amdgpu_dm_psr_disable() - disable psr f/w
10613 * @stream: stream state
10615 * Return: true if success
10617 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
10620 DRM_DEBUG_DRIVER("Disabling psr...\n");
10622 return dc_link_set_psr_allow_active(stream->link, false, true, false);
10626 * amdgpu_dm_psr_disable() - disable psr f/w
10627 * if psr is enabled on any stream
10629 * Return: true if success
10631 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
10633 DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
10634 return dc_set_psr_allow_active(dm->dc, false);
10637 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10639 struct amdgpu_device *adev = drm_to_adev(dev);
10640 struct dc *dc = adev->dm.dc;
10643 mutex_lock(&adev->dm.dc_lock);
10644 if (dc->current_state) {
10645 for (i = 0; i < dc->current_state->stream_count; ++i)
10646 dc->current_state->streams[i]
10647 ->triggered_crtc_reset.enabled =
10648 adev->dm.force_timing_sync;
10650 dm_enable_per_frame_crtc_master_sync(dc->current_state);
10651 dc_trigger_sync(dc, dc->current_state);
10653 mutex_unlock(&adev->dm.dc_lock);
10656 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10657 uint32_t value, const char *func_name)
10659 #ifdef DM_CHECK_ADDR_0
10660 if (address == 0) {
10661 DC_ERR("invalid register write. address = 0");
10665 cgs_write_register(ctx->cgs_device, address, value);
10666 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10669 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10670 const char *func_name)
10673 #ifdef DM_CHECK_ADDR_0
10674 if (address == 0) {
10675 DC_ERR("invalid register read; address = 0\n");
10680 if (ctx->dmub_srv &&
10681 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10682 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10687 value = cgs_read_register(ctx->cgs_device, address);
10689 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);