2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
29 #include "dm_services_types.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "dc/dc_edid_parser.h"
38 #include "dc/dc_stat.h"
39 #include "amdgpu_dm_trace.h"
43 #include "amdgpu_display.h"
44 #include "amdgpu_ucode.h"
46 #include "amdgpu_dm.h"
47 #ifdef CONFIG_DRM_AMD_DC_HDCP
48 #include "amdgpu_dm_hdcp.h"
49 #include <drm/drm_hdcp.h>
51 #include "amdgpu_pm.h"
53 #include "amd_shared.h"
54 #include "amdgpu_dm_irq.h"
55 #include "dm_helpers.h"
56 #include "amdgpu_dm_mst_types.h"
57 #if defined(CONFIG_DEBUG_FS)
58 #include "amdgpu_dm_debugfs.h"
61 #include "ivsrcid/ivsrcid_vislands30.h"
63 #include "i2caux_interface.h"
64 #include <linux/module.h>
65 #include <linux/moduleparam.h>
66 #include <linux/types.h>
67 #include <linux/pm_runtime.h>
68 #include <linux/pci.h>
69 #include <linux/firmware.h>
70 #include <linux/component.h>
72 #include <drm/drm_atomic.h>
73 #include <drm/drm_atomic_uapi.h>
74 #include <drm/drm_atomic_helper.h>
75 #include <drm/drm_dp_mst_helper.h>
76 #include <drm/drm_fb_helper.h>
77 #include <drm/drm_fourcc.h>
78 #include <drm/drm_edid.h>
79 #include <drm/drm_vblank.h>
80 #include <drm/drm_audio_component.h>
82 #if defined(CONFIG_DRM_AMD_DC_DCN)
83 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
85 #include "dcn/dcn_1_0_offset.h"
86 #include "dcn/dcn_1_0_sh_mask.h"
87 #include "soc15_hw_ip.h"
88 #include "vega10_ip_offset.h"
90 #include "soc15_common.h"
93 #include "modules/inc/mod_freesync.h"
94 #include "modules/power/power_helpers.h"
95 #include "modules/inc/mod_info_packet.h"
97 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
98 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
99 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
100 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
101 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
103 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
105 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
107 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
108 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
109 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
110 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
112 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
113 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
115 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
116 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
118 /* Number of bytes in PSP header for firmware. */
119 #define PSP_HEADER_BYTES 0x100
121 /* Number of bytes in PSP footer for firmware. */
122 #define PSP_FOOTER_BYTES 0x100
127 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
128 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
129 * requests into DC requests, and DC responses into DRM responses.
131 * The root control structure is &struct amdgpu_display_manager.
134 /* basic init/fini API */
135 static int amdgpu_dm_init(struct amdgpu_device *adev);
136 static void amdgpu_dm_fini(struct amdgpu_device *adev);
137 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
139 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
141 switch (link->dpcd_caps.dongle_type) {
142 case DISPLAY_DONGLE_NONE:
143 return DRM_MODE_SUBCONNECTOR_Native;
144 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
145 return DRM_MODE_SUBCONNECTOR_VGA;
146 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
147 case DISPLAY_DONGLE_DP_DVI_DONGLE:
148 return DRM_MODE_SUBCONNECTOR_DVID;
149 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
150 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
151 return DRM_MODE_SUBCONNECTOR_HDMIA;
152 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
154 return DRM_MODE_SUBCONNECTOR_Unknown;
158 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
160 struct dc_link *link = aconnector->dc_link;
161 struct drm_connector *connector = &aconnector->base;
162 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
164 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
167 if (aconnector->dc_sink)
168 subconnector = get_subconnector_type(link);
170 drm_object_property_set_value(&connector->base,
171 connector->dev->mode_config.dp_subconnector_property,
176 * initializes drm_device display related structures, based on the information
177 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
178 * drm_encoder, drm_mode_config
180 * Returns 0 on success
182 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
183 /* removes and deallocates the drm structures, created by the above function */
184 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
186 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
187 struct drm_plane *plane,
188 unsigned long possible_crtcs,
189 const struct dc_plane_cap *plane_cap);
190 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
191 struct drm_plane *plane,
192 uint32_t link_index);
193 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
194 struct amdgpu_dm_connector *amdgpu_dm_connector,
196 struct amdgpu_encoder *amdgpu_encoder);
197 static int amdgpu_dm_encoder_init(struct drm_device *dev,
198 struct amdgpu_encoder *aencoder,
199 uint32_t link_index);
201 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
203 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
205 static int amdgpu_dm_atomic_check(struct drm_device *dev,
206 struct drm_atomic_state *state);
208 static void handle_cursor_update(struct drm_plane *plane,
209 struct drm_plane_state *old_plane_state);
211 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
212 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
213 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
214 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
215 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
217 static const struct drm_format_info *
218 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
221 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
222 struct drm_crtc_state *new_crtc_state);
224 * dm_vblank_get_counter
227 * Get counter for number of vertical blanks
230 * struct amdgpu_device *adev - [in] desired amdgpu device
231 * int disp_idx - [in] which CRTC to get the counter from
234 * Counter for vertical blanks
236 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
238 if (crtc >= adev->mode_info.num_crtc)
241 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
243 if (acrtc->dm_irq_params.stream == NULL) {
244 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
249 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
253 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
254 u32 *vbl, u32 *position)
256 uint32_t v_blank_start, v_blank_end, h_position, v_position;
258 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
261 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
263 if (acrtc->dm_irq_params.stream == NULL) {
264 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
270 * TODO rework base driver to use values directly.
271 * for now parse it back into reg-format
273 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
279 *position = v_position | (h_position << 16);
280 *vbl = v_blank_start | (v_blank_end << 16);
286 static bool dm_is_idle(void *handle)
292 static int dm_wait_for_idle(void *handle)
298 static bool dm_check_soft_reset(void *handle)
303 static int dm_soft_reset(void *handle)
309 static struct amdgpu_crtc *
310 get_crtc_by_otg_inst(struct amdgpu_device *adev,
313 struct drm_device *dev = adev_to_drm(adev);
314 struct drm_crtc *crtc;
315 struct amdgpu_crtc *amdgpu_crtc;
317 if (otg_inst == -1) {
319 return adev->mode_info.crtcs[0];
322 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
323 amdgpu_crtc = to_amdgpu_crtc(crtc);
325 if (amdgpu_crtc->otg_inst == otg_inst)
332 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
334 return acrtc->dm_irq_params.freesync_config.state ==
335 VRR_STATE_ACTIVE_VARIABLE ||
336 acrtc->dm_irq_params.freesync_config.state ==
337 VRR_STATE_ACTIVE_FIXED;
340 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
342 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
343 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
346 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
347 struct dm_crtc_state *new_state)
349 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
351 else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
358 * dm_pflip_high_irq() - Handle pageflip interrupt
359 * @interrupt_params: ignored
361 * Handles the pageflip interrupt by notifying all interested parties
362 * that the pageflip has been completed.
364 static void dm_pflip_high_irq(void *interrupt_params)
366 struct amdgpu_crtc *amdgpu_crtc;
367 struct common_irq_params *irq_params = interrupt_params;
368 struct amdgpu_device *adev = irq_params->adev;
370 struct drm_pending_vblank_event *e;
371 uint32_t vpos, hpos, v_blank_start, v_blank_end;
374 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
376 /* IRQ could occur when in initial stage */
377 /* TODO work and BO cleanup */
378 if (amdgpu_crtc == NULL) {
379 DC_LOG_PFLIP("CRTC is null, returning.\n");
383 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
385 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
386 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
387 amdgpu_crtc->pflip_status,
388 AMDGPU_FLIP_SUBMITTED,
389 amdgpu_crtc->crtc_id,
391 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
395 /* page flip completed. */
396 e = amdgpu_crtc->event;
397 amdgpu_crtc->event = NULL;
402 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
404 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
406 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
407 &v_blank_end, &hpos, &vpos) ||
408 (vpos < v_blank_start)) {
409 /* Update to correct count and vblank timestamp if racing with
410 * vblank irq. This also updates to the correct vblank timestamp
411 * even in VRR mode, as scanout is past the front-porch atm.
413 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
415 /* Wake up userspace by sending the pageflip event with proper
416 * count and timestamp of vblank of flip completion.
419 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
421 /* Event sent, so done with vblank for this flip */
422 drm_crtc_vblank_put(&amdgpu_crtc->base);
425 /* VRR active and inside front-porch: vblank count and
426 * timestamp for pageflip event will only be up to date after
427 * drm_crtc_handle_vblank() has been executed from late vblank
428 * irq handler after start of back-porch (vline 0). We queue the
429 * pageflip event for send-out by drm_crtc_handle_vblank() with
430 * updated timestamp and count, once it runs after us.
432 * We need to open-code this instead of using the helper
433 * drm_crtc_arm_vblank_event(), as that helper would
434 * call drm_crtc_accurate_vblank_count(), which we must
435 * not call in VRR mode while we are in front-porch!
438 /* sequence will be replaced by real count during send-out. */
439 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
440 e->pipe = amdgpu_crtc->crtc_id;
442 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
446 /* Keep track of vblank of this flip for flip throttling. We use the
447 * cooked hw counter, as that one incremented at start of this vblank
448 * of pageflip completion, so last_flip_vblank is the forbidden count
449 * for queueing new pageflips if vsync + VRR is enabled.
451 amdgpu_crtc->dm_irq_params.last_flip_vblank =
452 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
454 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
455 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
457 DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
458 amdgpu_crtc->crtc_id, amdgpu_crtc,
459 vrr_active, (int) !e);
462 static void dm_vupdate_high_irq(void *interrupt_params)
464 struct common_irq_params *irq_params = interrupt_params;
465 struct amdgpu_device *adev = irq_params->adev;
466 struct amdgpu_crtc *acrtc;
467 struct drm_device *drm_dev;
468 struct drm_vblank_crtc *vblank;
469 ktime_t frame_duration_ns, previous_timestamp;
473 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
476 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
477 drm_dev = acrtc->base.dev;
478 vblank = &drm_dev->vblank[acrtc->base.index];
479 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
480 frame_duration_ns = vblank->time - previous_timestamp;
482 if (frame_duration_ns > 0) {
483 trace_amdgpu_refresh_rate_track(acrtc->base.index,
485 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
486 atomic64_set(&irq_params->previous_timestamp, vblank->time);
489 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
493 /* Core vblank handling is done here after end of front-porch in
494 * vrr mode, as vblank timestamping will give valid results
495 * while now done after front-porch. This will also deliver
496 * page-flip completion events that have been queued to us
497 * if a pageflip happened inside front-porch.
500 drm_crtc_handle_vblank(&acrtc->base);
502 /* BTR processing for pre-DCE12 ASICs */
503 if (acrtc->dm_irq_params.stream &&
504 adev->family < AMDGPU_FAMILY_AI) {
505 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
506 mod_freesync_handle_v_update(
507 adev->dm.freesync_module,
508 acrtc->dm_irq_params.stream,
509 &acrtc->dm_irq_params.vrr_params);
511 dc_stream_adjust_vmin_vmax(
513 acrtc->dm_irq_params.stream,
514 &acrtc->dm_irq_params.vrr_params.adjust);
515 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
522 * dm_crtc_high_irq() - Handles CRTC interrupt
523 * @interrupt_params: used for determining the CRTC instance
525 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
528 static void dm_crtc_high_irq(void *interrupt_params)
530 struct common_irq_params *irq_params = interrupt_params;
531 struct amdgpu_device *adev = irq_params->adev;
532 struct amdgpu_crtc *acrtc;
536 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
540 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
542 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
543 vrr_active, acrtc->dm_irq_params.active_planes);
546 * Core vblank handling at start of front-porch is only possible
547 * in non-vrr mode, as only there vblank timestamping will give
548 * valid results while done in front-porch. Otherwise defer it
549 * to dm_vupdate_high_irq after end of front-porch.
552 drm_crtc_handle_vblank(&acrtc->base);
555 * Following stuff must happen at start of vblank, for crc
556 * computation and below-the-range btr support in vrr mode.
558 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
560 /* BTR updates need to happen before VUPDATE on Vega and above. */
561 if (adev->family < AMDGPU_FAMILY_AI)
564 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
566 if (acrtc->dm_irq_params.stream &&
567 acrtc->dm_irq_params.vrr_params.supported &&
568 acrtc->dm_irq_params.freesync_config.state ==
569 VRR_STATE_ACTIVE_VARIABLE) {
570 mod_freesync_handle_v_update(adev->dm.freesync_module,
571 acrtc->dm_irq_params.stream,
572 &acrtc->dm_irq_params.vrr_params);
574 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
575 &acrtc->dm_irq_params.vrr_params.adjust);
579 * If there aren't any active_planes then DCH HUBP may be clock-gated.
580 * In that case, pageflip completion interrupts won't fire and pageflip
581 * completion events won't get delivered. Prevent this by sending
582 * pending pageflip events from here if a flip is still pending.
584 * If any planes are enabled, use dm_pflip_high_irq() instead, to
585 * avoid race conditions between flip programming and completion,
586 * which could cause too early flip completion events.
588 if (adev->family >= AMDGPU_FAMILY_RV &&
589 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
590 acrtc->dm_irq_params.active_planes == 0) {
592 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
594 drm_crtc_vblank_put(&acrtc->base);
596 acrtc->pflip_status = AMDGPU_FLIP_NONE;
599 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
602 #if defined(CONFIG_DRM_AMD_DC_DCN)
604 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
605 * DCN generation ASICs
606 * @interrupt params - interrupt parameters
608 * Used to set crc window/read out crc value at vertical line 0 position
610 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
611 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
613 struct common_irq_params *irq_params = interrupt_params;
614 struct amdgpu_device *adev = irq_params->adev;
615 struct amdgpu_crtc *acrtc;
617 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
622 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
627 * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
628 * @interrupt_params: used for determining the Outbox instance
630 * Handles the Outbox Interrupt
633 #define DMUB_TRACE_MAX_READ 64
634 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
636 struct dmub_notification notify;
637 struct common_irq_params *irq_params = interrupt_params;
638 struct amdgpu_device *adev = irq_params->adev;
639 struct amdgpu_display_manager *dm = &adev->dm;
640 struct dmcub_trace_buf_entry entry = { 0 };
643 if (dc_enable_dmub_notifications(adev->dm.dc)) {
644 if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
646 dc_stat_get_dmub_notification(adev->dm.dc, ¬ify);
647 } while (notify.pending_notification);
649 if (adev->dm.dmub_notify)
650 memcpy(adev->dm.dmub_notify, ¬ify, sizeof(struct dmub_notification));
651 if (notify.type == DMUB_NOTIFICATION_AUX_REPLY)
652 complete(&adev->dm.dmub_aux_transfer_done);
653 // TODO : HPD Implementation
656 DRM_ERROR("DM: Failed to receive correct outbox IRQ !");
662 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
663 trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
664 entry.param0, entry.param1);
666 DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
667 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
673 } while (count <= DMUB_TRACE_MAX_READ);
675 ASSERT(count <= DMUB_TRACE_MAX_READ);
679 static int dm_set_clockgating_state(void *handle,
680 enum amd_clockgating_state state)
685 static int dm_set_powergating_state(void *handle,
686 enum amd_powergating_state state)
691 /* Prototypes of private functions */
692 static int dm_early_init(void* handle);
694 /* Allocate memory for FBC compressed data */
695 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
697 struct drm_device *dev = connector->dev;
698 struct amdgpu_device *adev = drm_to_adev(dev);
699 struct dm_compressor_info *compressor = &adev->dm.compressor;
700 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
701 struct drm_display_mode *mode;
702 unsigned long max_size = 0;
704 if (adev->dm.dc->fbc_compressor == NULL)
707 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
710 if (compressor->bo_ptr)
714 list_for_each_entry(mode, &connector->modes, head) {
715 if (max_size < mode->htotal * mode->vtotal)
716 max_size = mode->htotal * mode->vtotal;
720 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
721 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
722 &compressor->gpu_addr, &compressor->cpu_addr);
725 DRM_ERROR("DM: Failed to initialize FBC\n");
727 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
728 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
735 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
736 int pipe, bool *enabled,
737 unsigned char *buf, int max_bytes)
739 struct drm_device *dev = dev_get_drvdata(kdev);
740 struct amdgpu_device *adev = drm_to_adev(dev);
741 struct drm_connector *connector;
742 struct drm_connector_list_iter conn_iter;
743 struct amdgpu_dm_connector *aconnector;
748 mutex_lock(&adev->dm.audio_lock);
750 drm_connector_list_iter_begin(dev, &conn_iter);
751 drm_for_each_connector_iter(connector, &conn_iter) {
752 aconnector = to_amdgpu_dm_connector(connector);
753 if (aconnector->audio_inst != port)
757 ret = drm_eld_size(connector->eld);
758 memcpy(buf, connector->eld, min(max_bytes, ret));
762 drm_connector_list_iter_end(&conn_iter);
764 mutex_unlock(&adev->dm.audio_lock);
766 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
771 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
772 .get_eld = amdgpu_dm_audio_component_get_eld,
775 static int amdgpu_dm_audio_component_bind(struct device *kdev,
776 struct device *hda_kdev, void *data)
778 struct drm_device *dev = dev_get_drvdata(kdev);
779 struct amdgpu_device *adev = drm_to_adev(dev);
780 struct drm_audio_component *acomp = data;
782 acomp->ops = &amdgpu_dm_audio_component_ops;
784 adev->dm.audio_component = acomp;
789 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
790 struct device *hda_kdev, void *data)
792 struct drm_device *dev = dev_get_drvdata(kdev);
793 struct amdgpu_device *adev = drm_to_adev(dev);
794 struct drm_audio_component *acomp = data;
798 adev->dm.audio_component = NULL;
801 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
802 .bind = amdgpu_dm_audio_component_bind,
803 .unbind = amdgpu_dm_audio_component_unbind,
806 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
813 adev->mode_info.audio.enabled = true;
815 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
817 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
818 adev->mode_info.audio.pin[i].channels = -1;
819 adev->mode_info.audio.pin[i].rate = -1;
820 adev->mode_info.audio.pin[i].bits_per_sample = -1;
821 adev->mode_info.audio.pin[i].status_bits = 0;
822 adev->mode_info.audio.pin[i].category_code = 0;
823 adev->mode_info.audio.pin[i].connected = false;
824 adev->mode_info.audio.pin[i].id =
825 adev->dm.dc->res_pool->audios[i]->inst;
826 adev->mode_info.audio.pin[i].offset = 0;
829 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
833 adev->dm.audio_registered = true;
838 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
843 if (!adev->mode_info.audio.enabled)
846 if (adev->dm.audio_registered) {
847 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
848 adev->dm.audio_registered = false;
851 /* TODO: Disable audio? */
853 adev->mode_info.audio.enabled = false;
856 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
858 struct drm_audio_component *acomp = adev->dm.audio_component;
860 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
861 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
863 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
868 static int dm_dmub_hw_init(struct amdgpu_device *adev)
870 const struct dmcub_firmware_header_v1_0 *hdr;
871 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
872 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
873 const struct firmware *dmub_fw = adev->dm.dmub_fw;
874 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
875 struct abm *abm = adev->dm.dc->res_pool->abm;
876 struct dmub_srv_hw_params hw_params;
877 enum dmub_status status;
878 const unsigned char *fw_inst_const, *fw_bss_data;
879 uint32_t i, fw_inst_const_size, fw_bss_data_size;
883 /* DMUB isn't supported on the ASIC. */
887 DRM_ERROR("No framebuffer info for DMUB service.\n");
892 /* Firmware required for DMUB support. */
893 DRM_ERROR("No firmware provided for DMUB.\n");
897 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
898 if (status != DMUB_STATUS_OK) {
899 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
903 if (!has_hw_support) {
904 DRM_INFO("DMUB unsupported on ASIC\n");
908 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
910 fw_inst_const = dmub_fw->data +
911 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
914 fw_bss_data = dmub_fw->data +
915 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
916 le32_to_cpu(hdr->inst_const_bytes);
918 /* Copy firmware and bios info into FB memory. */
919 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
920 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
922 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
924 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
925 * amdgpu_ucode_init_single_fw will load dmub firmware
926 * fw_inst_const part to cw0; otherwise, the firmware back door load
927 * will be done by dm_dmub_hw_init
929 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
930 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
934 if (fw_bss_data_size)
935 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
936 fw_bss_data, fw_bss_data_size);
938 /* Copy firmware bios info into FB memory. */
939 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
942 /* Reset regions that need to be reset. */
943 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
944 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
946 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
947 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
949 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
950 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
952 /* Initialize hardware. */
953 memset(&hw_params, 0, sizeof(hw_params));
954 hw_params.fb_base = adev->gmc.fb_start;
955 hw_params.fb_offset = adev->gmc.aper_base;
957 /* backdoor load firmware and trigger dmub running */
958 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
959 hw_params.load_inst_const = true;
962 hw_params.psp_version = dmcu->psp_version;
964 for (i = 0; i < fb_info->num_fb; ++i)
965 hw_params.fb[i] = &fb_info->fb[i];
967 status = dmub_srv_hw_init(dmub_srv, &hw_params);
968 if (status != DMUB_STATUS_OK) {
969 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
973 /* Wait for firmware load to finish. */
974 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
975 if (status != DMUB_STATUS_OK)
976 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
978 /* Init DMCU and ABM if available. */
980 dmcu->funcs->dmcu_init(dmcu);
981 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
984 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
985 if (!adev->dm.dc->ctx->dmub_srv) {
986 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
990 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
991 adev->dm.dmcub_fw_version);
996 #if defined(CONFIG_DRM_AMD_DC_DCN)
997 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1000 uint32_t logical_addr_low;
1001 uint32_t logical_addr_high;
1002 uint32_t agp_base, agp_bot, agp_top;
1003 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1005 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1006 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1008 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1010 * Raven2 has a HW issue that it is unable to use the vram which
1011 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1012 * workaround that increase system aperture high address (add 1)
1013 * to get rid of the VM fault and hardware hang.
1015 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1017 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1020 agp_bot = adev->gmc.agp_start >> 24;
1021 agp_top = adev->gmc.agp_end >> 24;
1024 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1025 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1026 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1027 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1028 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1029 page_table_base.low_part = lower_32_bits(pt_base);
1031 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1032 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1034 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1035 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1036 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1038 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1039 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1040 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1042 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1043 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1044 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1046 pa_config->is_hvm_enabled = 0;
1050 #if defined(CONFIG_DRM_AMD_DC_DCN)
1051 static void event_mall_stutter(struct work_struct *work)
1054 struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
1055 struct amdgpu_display_manager *dm = vblank_work->dm;
1057 mutex_lock(&dm->dc_lock);
1059 if (vblank_work->enable)
1060 dm->active_vblank_irq_count++;
1061 else if(dm->active_vblank_irq_count)
1062 dm->active_vblank_irq_count--;
1064 dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1066 DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1068 mutex_unlock(&dm->dc_lock);
1071 static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
1074 int max_caps = dc->caps.max_links;
1075 struct vblank_workqueue *vblank_work;
1078 vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
1079 if (ZERO_OR_NULL_PTR(vblank_work)) {
1084 for (i = 0; i < max_caps; i++)
1085 INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
1090 static int amdgpu_dm_init(struct amdgpu_device *adev)
1092 struct dc_init_data init_data;
1093 #ifdef CONFIG_DRM_AMD_DC_HDCP
1094 struct dc_callback_init init_params;
1098 adev->dm.ddev = adev_to_drm(adev);
1099 adev->dm.adev = adev;
1101 /* Zero all the fields */
1102 memset(&init_data, 0, sizeof(init_data));
1103 #ifdef CONFIG_DRM_AMD_DC_HDCP
1104 memset(&init_params, 0, sizeof(init_params));
1107 mutex_init(&adev->dm.dc_lock);
1108 mutex_init(&adev->dm.audio_lock);
1109 #if defined(CONFIG_DRM_AMD_DC_DCN)
1110 spin_lock_init(&adev->dm.vblank_lock);
1113 if(amdgpu_dm_irq_init(adev)) {
1114 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1118 init_data.asic_id.chip_family = adev->family;
1120 init_data.asic_id.pci_revision_id = adev->pdev->revision;
1121 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1123 init_data.asic_id.vram_width = adev->gmc.vram_width;
1124 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1125 init_data.asic_id.atombios_base_address =
1126 adev->mode_info.atom_context->bios;
1128 init_data.driver = adev;
1130 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1132 if (!adev->dm.cgs_device) {
1133 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1137 init_data.cgs_device = adev->dm.cgs_device;
1139 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1141 switch (adev->asic_type) {
1146 init_data.flags.gpu_vm_support = true;
1147 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1148 init_data.flags.disable_dmcu = true;
1150 #if defined(CONFIG_DRM_AMD_DC_DCN)
1152 init_data.flags.gpu_vm_support = true;
1159 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1160 init_data.flags.fbc_support = true;
1162 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1163 init_data.flags.multi_mon_pp_mclk_switch = true;
1165 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1166 init_data.flags.disable_fractional_pwm = true;
1168 init_data.flags.power_down_display_on_boot = true;
1170 INIT_LIST_HEAD(&adev->dm.da_list);
1171 /* Display Core create. */
1172 adev->dm.dc = dc_create(&init_data);
1175 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1177 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1181 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1182 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1183 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1186 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1187 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1189 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1190 adev->dm.dc->debug.disable_stutter = true;
1192 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1193 adev->dm.dc->debug.disable_dsc = true;
1195 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1196 adev->dm.dc->debug.disable_clock_gate = true;
1198 r = dm_dmub_hw_init(adev);
1200 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1204 dc_hardware_init(adev->dm.dc);
1206 #if defined(CONFIG_DRM_AMD_DC_DCN)
1207 if (adev->apu_flags) {
1208 struct dc_phy_addr_space_config pa_config;
1210 mmhub_read_system_context(adev, &pa_config);
1212 // Call the DC init_memory func
1213 dc_setup_system_context(adev->dm.dc, &pa_config);
1217 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1218 if (!adev->dm.freesync_module) {
1220 "amdgpu: failed to initialize freesync_module.\n");
1222 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1223 adev->dm.freesync_module);
1225 amdgpu_dm_init_color_mod();
1227 #if defined(CONFIG_DRM_AMD_DC_DCN)
1228 if (adev->dm.dc->caps.max_links > 0) {
1229 adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
1231 if (!adev->dm.vblank_workqueue)
1232 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1234 DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
1238 #ifdef CONFIG_DRM_AMD_DC_HDCP
1239 if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1240 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1242 if (!adev->dm.hdcp_workqueue)
1243 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1245 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1247 dc_init_callbacks(adev->dm.dc, &init_params);
1250 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1251 adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1253 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1254 init_completion(&adev->dm.dmub_aux_transfer_done);
1255 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1256 if (!adev->dm.dmub_notify) {
1257 DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1260 amdgpu_dm_outbox_init(adev);
1263 if (amdgpu_dm_initialize_drm_device(adev)) {
1265 "amdgpu: failed to initialize sw for display support.\n");
1269 /* create fake encoders for MST */
1270 dm_dp_create_fake_mst_encoders(adev);
1272 /* TODO: Add_display_info? */
1274 /* TODO use dynamic cursor width */
1275 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1276 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1278 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1280 "amdgpu: failed to initialize sw for display support.\n");
1285 DRM_DEBUG_DRIVER("KMS initialized.\n");
1289 amdgpu_dm_fini(adev);
1294 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1298 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1299 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1302 amdgpu_dm_audio_fini(adev);
1304 amdgpu_dm_destroy_drm_device(&adev->dm);
1306 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1307 if (adev->dm.crc_rd_wrk) {
1308 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1309 kfree(adev->dm.crc_rd_wrk);
1310 adev->dm.crc_rd_wrk = NULL;
1313 #ifdef CONFIG_DRM_AMD_DC_HDCP
1314 if (adev->dm.hdcp_workqueue) {
1315 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1316 adev->dm.hdcp_workqueue = NULL;
1320 dc_deinit_callbacks(adev->dm.dc);
1323 #if defined(CONFIG_DRM_AMD_DC_DCN)
1324 if (adev->dm.vblank_workqueue) {
1325 adev->dm.vblank_workqueue->dm = NULL;
1326 kfree(adev->dm.vblank_workqueue);
1327 adev->dm.vblank_workqueue = NULL;
1331 if (adev->dm.dc->ctx->dmub_srv) {
1332 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1333 adev->dm.dc->ctx->dmub_srv = NULL;
1336 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1337 kfree(adev->dm.dmub_notify);
1338 adev->dm.dmub_notify = NULL;
1341 if (adev->dm.dmub_bo)
1342 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1343 &adev->dm.dmub_bo_gpu_addr,
1344 &adev->dm.dmub_bo_cpu_addr);
1346 /* DC Destroy TODO: Replace destroy DAL */
1348 dc_destroy(&adev->dm.dc);
1350 * TODO: pageflip, vlank interrupt
1352 * amdgpu_dm_irq_fini(adev);
1355 if (adev->dm.cgs_device) {
1356 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1357 adev->dm.cgs_device = NULL;
1359 if (adev->dm.freesync_module) {
1360 mod_freesync_destroy(adev->dm.freesync_module);
1361 adev->dm.freesync_module = NULL;
1364 mutex_destroy(&adev->dm.audio_lock);
1365 mutex_destroy(&adev->dm.dc_lock);
1370 static int load_dmcu_fw(struct amdgpu_device *adev)
1372 const char *fw_name_dmcu = NULL;
1374 const struct dmcu_firmware_header_v1_0 *hdr;
1376 switch(adev->asic_type) {
1377 #if defined(CONFIG_DRM_AMD_DC_SI)
1392 case CHIP_POLARIS11:
1393 case CHIP_POLARIS10:
1394 case CHIP_POLARIS12:
1402 case CHIP_SIENNA_CICHLID:
1403 case CHIP_NAVY_FLOUNDER:
1404 case CHIP_DIMGREY_CAVEFISH:
1405 case CHIP_BEIGE_GOBY:
1409 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1412 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1413 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1414 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1415 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1420 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1424 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1425 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1429 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1431 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1432 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1433 adev->dm.fw_dmcu = NULL;
1437 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1442 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1444 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1446 release_firmware(adev->dm.fw_dmcu);
1447 adev->dm.fw_dmcu = NULL;
1451 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1452 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1453 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1454 adev->firmware.fw_size +=
1455 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1457 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1458 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1459 adev->firmware.fw_size +=
1460 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1462 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1464 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1469 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1471 struct amdgpu_device *adev = ctx;
1473 return dm_read_reg(adev->dm.dc->ctx, address);
1476 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1479 struct amdgpu_device *adev = ctx;
1481 return dm_write_reg(adev->dm.dc->ctx, address, value);
1484 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1486 struct dmub_srv_create_params create_params;
1487 struct dmub_srv_region_params region_params;
1488 struct dmub_srv_region_info region_info;
1489 struct dmub_srv_fb_params fb_params;
1490 struct dmub_srv_fb_info *fb_info;
1491 struct dmub_srv *dmub_srv;
1492 const struct dmcub_firmware_header_v1_0 *hdr;
1493 const char *fw_name_dmub;
1494 enum dmub_asic dmub_asic;
1495 enum dmub_status status;
1498 switch (adev->asic_type) {
1500 dmub_asic = DMUB_ASIC_DCN21;
1501 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1502 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1503 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1505 case CHIP_SIENNA_CICHLID:
1506 dmub_asic = DMUB_ASIC_DCN30;
1507 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1509 case CHIP_NAVY_FLOUNDER:
1510 dmub_asic = DMUB_ASIC_DCN30;
1511 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1514 dmub_asic = DMUB_ASIC_DCN301;
1515 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1517 case CHIP_DIMGREY_CAVEFISH:
1518 dmub_asic = DMUB_ASIC_DCN302;
1519 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1521 case CHIP_BEIGE_GOBY:
1522 dmub_asic = DMUB_ASIC_DCN303;
1523 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1527 /* ASIC doesn't support DMUB. */
1531 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1533 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1537 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1539 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1543 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1545 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1546 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1547 AMDGPU_UCODE_ID_DMCUB;
1548 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1550 adev->firmware.fw_size +=
1551 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1553 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1554 adev->dm.dmcub_fw_version);
1557 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1559 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1560 dmub_srv = adev->dm.dmub_srv;
1563 DRM_ERROR("Failed to allocate DMUB service!\n");
1567 memset(&create_params, 0, sizeof(create_params));
1568 create_params.user_ctx = adev;
1569 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1570 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1571 create_params.asic = dmub_asic;
1573 /* Create the DMUB service. */
1574 status = dmub_srv_create(dmub_srv, &create_params);
1575 if (status != DMUB_STATUS_OK) {
1576 DRM_ERROR("Error creating DMUB service: %d\n", status);
1580 /* Calculate the size of all the regions for the DMUB service. */
1581 memset(®ion_params, 0, sizeof(region_params));
1583 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1584 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1585 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1586 region_params.vbios_size = adev->bios_size;
1587 region_params.fw_bss_data = region_params.bss_data_size ?
1588 adev->dm.dmub_fw->data +
1589 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1590 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1591 region_params.fw_inst_const =
1592 adev->dm.dmub_fw->data +
1593 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1596 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
1599 if (status != DMUB_STATUS_OK) {
1600 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1605 * Allocate a framebuffer based on the total size of all the regions.
1606 * TODO: Move this into GART.
1608 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1609 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1610 &adev->dm.dmub_bo_gpu_addr,
1611 &adev->dm.dmub_bo_cpu_addr);
1615 /* Rebase the regions on the framebuffer address. */
1616 memset(&fb_params, 0, sizeof(fb_params));
1617 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1618 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1619 fb_params.region_info = ®ion_info;
1621 adev->dm.dmub_fb_info =
1622 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1623 fb_info = adev->dm.dmub_fb_info;
1627 "Failed to allocate framebuffer info for DMUB service!\n");
1631 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1632 if (status != DMUB_STATUS_OK) {
1633 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1640 static int dm_sw_init(void *handle)
1642 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1645 r = dm_dmub_sw_init(adev);
1649 return load_dmcu_fw(adev);
1652 static int dm_sw_fini(void *handle)
1654 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1656 kfree(adev->dm.dmub_fb_info);
1657 adev->dm.dmub_fb_info = NULL;
1659 if (adev->dm.dmub_srv) {
1660 dmub_srv_destroy(adev->dm.dmub_srv);
1661 adev->dm.dmub_srv = NULL;
1664 release_firmware(adev->dm.dmub_fw);
1665 adev->dm.dmub_fw = NULL;
1667 release_firmware(adev->dm.fw_dmcu);
1668 adev->dm.fw_dmcu = NULL;
1673 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1675 struct amdgpu_dm_connector *aconnector;
1676 struct drm_connector *connector;
1677 struct drm_connector_list_iter iter;
1680 drm_connector_list_iter_begin(dev, &iter);
1681 drm_for_each_connector_iter(connector, &iter) {
1682 aconnector = to_amdgpu_dm_connector(connector);
1683 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1684 aconnector->mst_mgr.aux) {
1685 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1687 aconnector->base.base.id);
1689 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1691 DRM_ERROR("DM_MST: Failed to start MST\n");
1692 aconnector->dc_link->type =
1693 dc_connection_single;
1698 drm_connector_list_iter_end(&iter);
1703 static int dm_late_init(void *handle)
1705 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1707 struct dmcu_iram_parameters params;
1708 unsigned int linear_lut[16];
1710 struct dmcu *dmcu = NULL;
1713 dmcu = adev->dm.dc->res_pool->dmcu;
1715 for (i = 0; i < 16; i++)
1716 linear_lut[i] = 0xFFFF * i / 15;
1719 params.backlight_ramping_start = 0xCCCC;
1720 params.backlight_ramping_reduction = 0xCCCCCCCC;
1721 params.backlight_lut_array_size = 16;
1722 params.backlight_lut_array = linear_lut;
1724 /* Min backlight level after ABM reduction, Don't allow below 1%
1725 * 0xFFFF x 0.01 = 0x28F
1727 params.min_abm_backlight = 0x28F;
1729 /* In the case where abm is implemented on dmcub,
1730 * dmcu object will be null.
1731 * ABM 2.4 and up are implemented on dmcub.
1734 ret = dmcu_load_iram(dmcu, params);
1735 else if (adev->dm.dc->ctx->dmub_srv)
1736 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1741 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1744 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1746 struct amdgpu_dm_connector *aconnector;
1747 struct drm_connector *connector;
1748 struct drm_connector_list_iter iter;
1749 struct drm_dp_mst_topology_mgr *mgr;
1751 bool need_hotplug = false;
1753 drm_connector_list_iter_begin(dev, &iter);
1754 drm_for_each_connector_iter(connector, &iter) {
1755 aconnector = to_amdgpu_dm_connector(connector);
1756 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1757 aconnector->mst_port)
1760 mgr = &aconnector->mst_mgr;
1763 drm_dp_mst_topology_mgr_suspend(mgr);
1765 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1767 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1768 need_hotplug = true;
1772 drm_connector_list_iter_end(&iter);
1775 drm_kms_helper_hotplug_event(dev);
1778 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1780 struct smu_context *smu = &adev->smu;
1783 if (!is_support_sw_smu(adev))
1786 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1787 * on window driver dc implementation.
1788 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1789 * should be passed to smu during boot up and resume from s3.
1790 * boot up: dc calculate dcn watermark clock settings within dc_create,
1791 * dcn20_resource_construct
1792 * then call pplib functions below to pass the settings to smu:
1793 * smu_set_watermarks_for_clock_ranges
1794 * smu_set_watermarks_table
1795 * navi10_set_watermarks_table
1796 * smu_write_watermarks_table
1798 * For Renoir, clock settings of dcn watermark are also fixed values.
1799 * dc has implemented different flow for window driver:
1800 * dc_hardware_init / dc_set_power_state
1805 * smu_set_watermarks_for_clock_ranges
1806 * renoir_set_watermarks_table
1807 * smu_write_watermarks_table
1810 * dc_hardware_init -> amdgpu_dm_init
1811 * dc_set_power_state --> dm_resume
1813 * therefore, this function apply to navi10/12/14 but not Renoir
1816 switch(adev->asic_type) {
1825 ret = smu_write_watermarks_table(smu);
1827 DRM_ERROR("Failed to update WMTABLE!\n");
1835 * dm_hw_init() - Initialize DC device
1836 * @handle: The base driver device containing the amdgpu_dm device.
1838 * Initialize the &struct amdgpu_display_manager device. This involves calling
1839 * the initializers of each DM component, then populating the struct with them.
1841 * Although the function implies hardware initialization, both hardware and
1842 * software are initialized here. Splitting them out to their relevant init
1843 * hooks is a future TODO item.
1845 * Some notable things that are initialized here:
1847 * - Display Core, both software and hardware
1848 * - DC modules that we need (freesync and color management)
1849 * - DRM software states
1850 * - Interrupt sources and handlers
1852 * - Debug FS entries, if enabled
1854 static int dm_hw_init(void *handle)
1856 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1857 /* Create DAL display manager */
1858 amdgpu_dm_init(adev);
1859 amdgpu_dm_hpd_init(adev);
1865 * dm_hw_fini() - Teardown DC device
1866 * @handle: The base driver device containing the amdgpu_dm device.
1868 * Teardown components within &struct amdgpu_display_manager that require
1869 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1870 * were loaded. Also flush IRQ workqueues and disable them.
1872 static int dm_hw_fini(void *handle)
1874 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1876 amdgpu_dm_hpd_fini(adev);
1878 amdgpu_dm_irq_fini(adev);
1879 amdgpu_dm_fini(adev);
1884 static int dm_enable_vblank(struct drm_crtc *crtc);
1885 static void dm_disable_vblank(struct drm_crtc *crtc);
1887 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1888 struct dc_state *state, bool enable)
1890 enum dc_irq_source irq_source;
1891 struct amdgpu_crtc *acrtc;
1895 for (i = 0; i < state->stream_count; i++) {
1896 acrtc = get_crtc_by_otg_inst(
1897 adev, state->stream_status[i].primary_otg_inst);
1899 if (acrtc && state->stream_status[i].plane_count != 0) {
1900 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1901 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1902 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
1903 acrtc->crtc_id, enable ? "en" : "dis", rc);
1905 DRM_WARN("Failed to %s pflip interrupts\n",
1906 enable ? "enable" : "disable");
1909 rc = dm_enable_vblank(&acrtc->base);
1911 DRM_WARN("Failed to enable vblank interrupts\n");
1913 dm_disable_vblank(&acrtc->base);
1921 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1923 struct dc_state *context = NULL;
1924 enum dc_status res = DC_ERROR_UNEXPECTED;
1926 struct dc_stream_state *del_streams[MAX_PIPES];
1927 int del_streams_count = 0;
1929 memset(del_streams, 0, sizeof(del_streams));
1931 context = dc_create_state(dc);
1932 if (context == NULL)
1933 goto context_alloc_fail;
1935 dc_resource_state_copy_construct_current(dc, context);
1937 /* First remove from context all streams */
1938 for (i = 0; i < context->stream_count; i++) {
1939 struct dc_stream_state *stream = context->streams[i];
1941 del_streams[del_streams_count++] = stream;
1944 /* Remove all planes for removed streams and then remove the streams */
1945 for (i = 0; i < del_streams_count; i++) {
1946 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1947 res = DC_FAIL_DETACH_SURFACES;
1951 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1957 res = dc_validate_global_state(dc, context, false);
1960 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1964 res = dc_commit_state(dc, context);
1967 dc_release_state(context);
1973 static int dm_suspend(void *handle)
1975 struct amdgpu_device *adev = handle;
1976 struct amdgpu_display_manager *dm = &adev->dm;
1979 if (amdgpu_in_reset(adev)) {
1980 mutex_lock(&dm->dc_lock);
1982 #if defined(CONFIG_DRM_AMD_DC_DCN)
1983 dc_allow_idle_optimizations(adev->dm.dc, false);
1986 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1988 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1990 amdgpu_dm_commit_zero_streams(dm->dc);
1992 amdgpu_dm_irq_suspend(adev);
1997 WARN_ON(adev->dm.cached_state);
1998 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2000 s3_handle_mst(adev_to_drm(adev), true);
2002 amdgpu_dm_irq_suspend(adev);
2005 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2010 static struct amdgpu_dm_connector *
2011 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2012 struct drm_crtc *crtc)
2015 struct drm_connector_state *new_con_state;
2016 struct drm_connector *connector;
2017 struct drm_crtc *crtc_from_state;
2019 for_each_new_connector_in_state(state, connector, new_con_state, i) {
2020 crtc_from_state = new_con_state->crtc;
2022 if (crtc_from_state == crtc)
2023 return to_amdgpu_dm_connector(connector);
2029 static void emulated_link_detect(struct dc_link *link)
2031 struct dc_sink_init_data sink_init_data = { 0 };
2032 struct display_sink_capability sink_caps = { 0 };
2033 enum dc_edid_status edid_status;
2034 struct dc_context *dc_ctx = link->ctx;
2035 struct dc_sink *sink = NULL;
2036 struct dc_sink *prev_sink = NULL;
2038 link->type = dc_connection_none;
2039 prev_sink = link->local_sink;
2042 dc_sink_release(prev_sink);
2044 switch (link->connector_signal) {
2045 case SIGNAL_TYPE_HDMI_TYPE_A: {
2046 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2047 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2051 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2052 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2053 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2057 case SIGNAL_TYPE_DVI_DUAL_LINK: {
2058 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2059 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2063 case SIGNAL_TYPE_LVDS: {
2064 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2065 sink_caps.signal = SIGNAL_TYPE_LVDS;
2069 case SIGNAL_TYPE_EDP: {
2070 sink_caps.transaction_type =
2071 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2072 sink_caps.signal = SIGNAL_TYPE_EDP;
2076 case SIGNAL_TYPE_DISPLAY_PORT: {
2077 sink_caps.transaction_type =
2078 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2079 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2084 DC_ERROR("Invalid connector type! signal:%d\n",
2085 link->connector_signal);
2089 sink_init_data.link = link;
2090 sink_init_data.sink_signal = sink_caps.signal;
2092 sink = dc_sink_create(&sink_init_data);
2094 DC_ERROR("Failed to create sink!\n");
2098 /* dc_sink_create returns a new reference */
2099 link->local_sink = sink;
2101 edid_status = dm_helpers_read_local_edid(
2106 if (edid_status != EDID_OK)
2107 DC_ERROR("Failed to read EDID");
2111 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2112 struct amdgpu_display_manager *dm)
2115 struct dc_surface_update surface_updates[MAX_SURFACES];
2116 struct dc_plane_info plane_infos[MAX_SURFACES];
2117 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2118 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2119 struct dc_stream_update stream_update;
2123 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2126 dm_error("Failed to allocate update bundle\n");
2130 for (k = 0; k < dc_state->stream_count; k++) {
2131 bundle->stream_update.stream = dc_state->streams[k];
2133 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2134 bundle->surface_updates[m].surface =
2135 dc_state->stream_status->plane_states[m];
2136 bundle->surface_updates[m].surface->force_full_update =
2139 dc_commit_updates_for_stream(
2140 dm->dc, bundle->surface_updates,
2141 dc_state->stream_status->plane_count,
2142 dc_state->streams[k], &bundle->stream_update, dc_state);
2151 static void dm_set_dpms_off(struct dc_link *link)
2153 struct dc_stream_state *stream_state;
2154 struct amdgpu_dm_connector *aconnector = link->priv;
2155 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2156 struct dc_stream_update stream_update;
2157 bool dpms_off = true;
2159 memset(&stream_update, 0, sizeof(stream_update));
2160 stream_update.dpms_off = &dpms_off;
2162 mutex_lock(&adev->dm.dc_lock);
2163 stream_state = dc_stream_find_from_link(link);
2165 if (stream_state == NULL) {
2166 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2167 mutex_unlock(&adev->dm.dc_lock);
2171 stream_update.stream = stream_state;
2172 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2173 stream_state, &stream_update,
2174 stream_state->ctx->dc->current_state);
2175 mutex_unlock(&adev->dm.dc_lock);
2178 static int dm_resume(void *handle)
2180 struct amdgpu_device *adev = handle;
2181 struct drm_device *ddev = adev_to_drm(adev);
2182 struct amdgpu_display_manager *dm = &adev->dm;
2183 struct amdgpu_dm_connector *aconnector;
2184 struct drm_connector *connector;
2185 struct drm_connector_list_iter iter;
2186 struct drm_crtc *crtc;
2187 struct drm_crtc_state *new_crtc_state;
2188 struct dm_crtc_state *dm_new_crtc_state;
2189 struct drm_plane *plane;
2190 struct drm_plane_state *new_plane_state;
2191 struct dm_plane_state *dm_new_plane_state;
2192 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2193 enum dc_connection_type new_connection_type = dc_connection_none;
2194 struct dc_state *dc_state;
2197 if (amdgpu_in_reset(adev)) {
2198 dc_state = dm->cached_dc_state;
2200 r = dm_dmub_hw_init(adev);
2202 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2204 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2207 amdgpu_dm_irq_resume_early(adev);
2209 for (i = 0; i < dc_state->stream_count; i++) {
2210 dc_state->streams[i]->mode_changed = true;
2211 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2212 dc_state->stream_status->plane_states[j]->update_flags.raw
2217 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2219 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2221 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2223 dc_release_state(dm->cached_dc_state);
2224 dm->cached_dc_state = NULL;
2226 amdgpu_dm_irq_resume_late(adev);
2228 mutex_unlock(&dm->dc_lock);
2232 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2233 dc_release_state(dm_state->context);
2234 dm_state->context = dc_create_state(dm->dc);
2235 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2236 dc_resource_state_construct(dm->dc, dm_state->context);
2238 /* Before powering on DC we need to re-initialize DMUB. */
2239 r = dm_dmub_hw_init(adev);
2241 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2243 /* power on hardware */
2244 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2246 /* program HPD filter */
2250 * early enable HPD Rx IRQ, should be done before set mode as short
2251 * pulse interrupts are used for MST
2253 amdgpu_dm_irq_resume_early(adev);
2255 /* On resume we need to rewrite the MSTM control bits to enable MST*/
2256 s3_handle_mst(ddev, false);
2259 drm_connector_list_iter_begin(ddev, &iter);
2260 drm_for_each_connector_iter(connector, &iter) {
2261 aconnector = to_amdgpu_dm_connector(connector);
2264 * this is the case when traversing through already created
2265 * MST connectors, should be skipped
2267 if (aconnector->mst_port)
2270 mutex_lock(&aconnector->hpd_lock);
2271 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2272 DRM_ERROR("KMS: Failed to detect connector\n");
2274 if (aconnector->base.force && new_connection_type == dc_connection_none)
2275 emulated_link_detect(aconnector->dc_link);
2277 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2279 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2280 aconnector->fake_enable = false;
2282 if (aconnector->dc_sink)
2283 dc_sink_release(aconnector->dc_sink);
2284 aconnector->dc_sink = NULL;
2285 amdgpu_dm_update_connector_after_detect(aconnector);
2286 mutex_unlock(&aconnector->hpd_lock);
2288 drm_connector_list_iter_end(&iter);
2290 /* Force mode set in atomic commit */
2291 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2292 new_crtc_state->active_changed = true;
2295 * atomic_check is expected to create the dc states. We need to release
2296 * them here, since they were duplicated as part of the suspend
2299 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2300 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2301 if (dm_new_crtc_state->stream) {
2302 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2303 dc_stream_release(dm_new_crtc_state->stream);
2304 dm_new_crtc_state->stream = NULL;
2308 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2309 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2310 if (dm_new_plane_state->dc_state) {
2311 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2312 dc_plane_state_release(dm_new_plane_state->dc_state);
2313 dm_new_plane_state->dc_state = NULL;
2317 drm_atomic_helper_resume(ddev, dm->cached_state);
2319 dm->cached_state = NULL;
2321 amdgpu_dm_irq_resume_late(adev);
2323 amdgpu_dm_smu_write_watermarks_table(adev);
2331 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2332 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2333 * the base driver's device list to be initialized and torn down accordingly.
2335 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2338 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2340 .early_init = dm_early_init,
2341 .late_init = dm_late_init,
2342 .sw_init = dm_sw_init,
2343 .sw_fini = dm_sw_fini,
2344 .hw_init = dm_hw_init,
2345 .hw_fini = dm_hw_fini,
2346 .suspend = dm_suspend,
2347 .resume = dm_resume,
2348 .is_idle = dm_is_idle,
2349 .wait_for_idle = dm_wait_for_idle,
2350 .check_soft_reset = dm_check_soft_reset,
2351 .soft_reset = dm_soft_reset,
2352 .set_clockgating_state = dm_set_clockgating_state,
2353 .set_powergating_state = dm_set_powergating_state,
2356 const struct amdgpu_ip_block_version dm_ip_block =
2358 .type = AMD_IP_BLOCK_TYPE_DCE,
2362 .funcs = &amdgpu_dm_funcs,
2372 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2373 .fb_create = amdgpu_display_user_framebuffer_create,
2374 .get_format_info = amd_get_format_info,
2375 .output_poll_changed = drm_fb_helper_output_poll_changed,
2376 .atomic_check = amdgpu_dm_atomic_check,
2377 .atomic_commit = drm_atomic_helper_commit,
2380 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2381 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2384 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2386 u32 max_cll, min_cll, max, min, q, r;
2387 struct amdgpu_dm_backlight_caps *caps;
2388 struct amdgpu_display_manager *dm;
2389 struct drm_connector *conn_base;
2390 struct amdgpu_device *adev;
2391 struct dc_link *link = NULL;
2392 static const u8 pre_computed_values[] = {
2393 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2394 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2396 if (!aconnector || !aconnector->dc_link)
2399 link = aconnector->dc_link;
2400 if (link->connector_signal != SIGNAL_TYPE_EDP)
2403 conn_base = &aconnector->base;
2404 adev = drm_to_adev(conn_base->dev);
2406 caps = &dm->backlight_caps;
2407 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2408 caps->aux_support = false;
2409 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2410 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2412 if (caps->ext_caps->bits.oled == 1 ||
2413 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2414 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2415 caps->aux_support = true;
2417 if (amdgpu_backlight == 0)
2418 caps->aux_support = false;
2419 else if (amdgpu_backlight == 1)
2420 caps->aux_support = true;
2422 /* From the specification (CTA-861-G), for calculating the maximum
2423 * luminance we need to use:
2424 * Luminance = 50*2**(CV/32)
2425 * Where CV is a one-byte value.
2426 * For calculating this expression we may need float point precision;
2427 * to avoid this complexity level, we take advantage that CV is divided
2428 * by a constant. From the Euclids division algorithm, we know that CV
2429 * can be written as: CV = 32*q + r. Next, we replace CV in the
2430 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2431 * need to pre-compute the value of r/32. For pre-computing the values
2432 * We just used the following Ruby line:
2433 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2434 * The results of the above expressions can be verified at
2435 * pre_computed_values.
2439 max = (1 << q) * pre_computed_values[r];
2441 // min luminance: maxLum * (CV/255)^2 / 100
2442 q = DIV_ROUND_CLOSEST(min_cll, 255);
2443 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2445 caps->aux_max_input_signal = max;
2446 caps->aux_min_input_signal = min;
2449 void amdgpu_dm_update_connector_after_detect(
2450 struct amdgpu_dm_connector *aconnector)
2452 struct drm_connector *connector = &aconnector->base;
2453 struct drm_device *dev = connector->dev;
2454 struct dc_sink *sink;
2456 /* MST handled by drm_mst framework */
2457 if (aconnector->mst_mgr.mst_state == true)
2460 sink = aconnector->dc_link->local_sink;
2462 dc_sink_retain(sink);
2465 * Edid mgmt connector gets first update only in mode_valid hook and then
2466 * the connector sink is set to either fake or physical sink depends on link status.
2467 * Skip if already done during boot.
2469 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2470 && aconnector->dc_em_sink) {
2473 * For S3 resume with headless use eml_sink to fake stream
2474 * because on resume connector->sink is set to NULL
2476 mutex_lock(&dev->mode_config.mutex);
2479 if (aconnector->dc_sink) {
2480 amdgpu_dm_update_freesync_caps(connector, NULL);
2482 * retain and release below are used to
2483 * bump up refcount for sink because the link doesn't point
2484 * to it anymore after disconnect, so on next crtc to connector
2485 * reshuffle by UMD we will get into unwanted dc_sink release
2487 dc_sink_release(aconnector->dc_sink);
2489 aconnector->dc_sink = sink;
2490 dc_sink_retain(aconnector->dc_sink);
2491 amdgpu_dm_update_freesync_caps(connector,
2494 amdgpu_dm_update_freesync_caps(connector, NULL);
2495 if (!aconnector->dc_sink) {
2496 aconnector->dc_sink = aconnector->dc_em_sink;
2497 dc_sink_retain(aconnector->dc_sink);
2501 mutex_unlock(&dev->mode_config.mutex);
2504 dc_sink_release(sink);
2509 * TODO: temporary guard to look for proper fix
2510 * if this sink is MST sink, we should not do anything
2512 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2513 dc_sink_release(sink);
2517 if (aconnector->dc_sink == sink) {
2519 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2522 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2523 aconnector->connector_id);
2525 dc_sink_release(sink);
2529 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2530 aconnector->connector_id, aconnector->dc_sink, sink);
2532 mutex_lock(&dev->mode_config.mutex);
2535 * 1. Update status of the drm connector
2536 * 2. Send an event and let userspace tell us what to do
2540 * TODO: check if we still need the S3 mode update workaround.
2541 * If yes, put it here.
2543 if (aconnector->dc_sink) {
2544 amdgpu_dm_update_freesync_caps(connector, NULL);
2545 dc_sink_release(aconnector->dc_sink);
2548 aconnector->dc_sink = sink;
2549 dc_sink_retain(aconnector->dc_sink);
2550 if (sink->dc_edid.length == 0) {
2551 aconnector->edid = NULL;
2552 if (aconnector->dc_link->aux_mode) {
2553 drm_dp_cec_unset_edid(
2554 &aconnector->dm_dp_aux.aux);
2558 (struct edid *)sink->dc_edid.raw_edid;
2560 drm_connector_update_edid_property(connector,
2562 if (aconnector->dc_link->aux_mode)
2563 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2567 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2568 update_connector_ext_caps(aconnector);
2570 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2571 amdgpu_dm_update_freesync_caps(connector, NULL);
2572 drm_connector_update_edid_property(connector, NULL);
2573 aconnector->num_modes = 0;
2574 dc_sink_release(aconnector->dc_sink);
2575 aconnector->dc_sink = NULL;
2576 aconnector->edid = NULL;
2577 #ifdef CONFIG_DRM_AMD_DC_HDCP
2578 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2579 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2580 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2584 mutex_unlock(&dev->mode_config.mutex);
2586 update_subconnector_property(aconnector);
2589 dc_sink_release(sink);
2592 static void handle_hpd_irq(void *param)
2594 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2595 struct drm_connector *connector = &aconnector->base;
2596 struct drm_device *dev = connector->dev;
2597 enum dc_connection_type new_connection_type = dc_connection_none;
2598 struct amdgpu_device *adev = drm_to_adev(dev);
2599 #ifdef CONFIG_DRM_AMD_DC_HDCP
2600 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2603 if (adev->dm.disable_hpd_irq)
2607 * In case of failure or MST no need to update connector status or notify the OS
2608 * since (for MST case) MST does this in its own context.
2610 mutex_lock(&aconnector->hpd_lock);
2612 #ifdef CONFIG_DRM_AMD_DC_HDCP
2613 if (adev->dm.hdcp_workqueue) {
2614 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2615 dm_con_state->update_hdcp = true;
2618 if (aconnector->fake_enable)
2619 aconnector->fake_enable = false;
2621 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2622 DRM_ERROR("KMS: Failed to detect connector\n");
2624 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2625 emulated_link_detect(aconnector->dc_link);
2628 drm_modeset_lock_all(dev);
2629 dm_restore_drm_connector_state(dev, connector);
2630 drm_modeset_unlock_all(dev);
2632 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2633 drm_kms_helper_hotplug_event(dev);
2635 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2636 if (new_connection_type == dc_connection_none &&
2637 aconnector->dc_link->type == dc_connection_none)
2638 dm_set_dpms_off(aconnector->dc_link);
2640 amdgpu_dm_update_connector_after_detect(aconnector);
2642 drm_modeset_lock_all(dev);
2643 dm_restore_drm_connector_state(dev, connector);
2644 drm_modeset_unlock_all(dev);
2646 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2647 drm_kms_helper_hotplug_event(dev);
2649 mutex_unlock(&aconnector->hpd_lock);
2653 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2655 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2657 bool new_irq_handled = false;
2659 int dpcd_bytes_to_read;
2661 const int max_process_count = 30;
2662 int process_count = 0;
2664 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2666 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2667 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2668 /* DPCD 0x200 - 0x201 for downstream IRQ */
2669 dpcd_addr = DP_SINK_COUNT;
2671 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2672 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2673 dpcd_addr = DP_SINK_COUNT_ESI;
2676 dret = drm_dp_dpcd_read(
2677 &aconnector->dm_dp_aux.aux,
2680 dpcd_bytes_to_read);
2682 while (dret == dpcd_bytes_to_read &&
2683 process_count < max_process_count) {
2689 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2690 /* handle HPD short pulse irq */
2691 if (aconnector->mst_mgr.mst_state)
2693 &aconnector->mst_mgr,
2697 if (new_irq_handled) {
2698 /* ACK at DPCD to notify down stream */
2699 const int ack_dpcd_bytes_to_write =
2700 dpcd_bytes_to_read - 1;
2702 for (retry = 0; retry < 3; retry++) {
2705 wret = drm_dp_dpcd_write(
2706 &aconnector->dm_dp_aux.aux,
2709 ack_dpcd_bytes_to_write);
2710 if (wret == ack_dpcd_bytes_to_write)
2714 /* check if there is new irq to be handled */
2715 dret = drm_dp_dpcd_read(
2716 &aconnector->dm_dp_aux.aux,
2719 dpcd_bytes_to_read);
2721 new_irq_handled = false;
2727 if (process_count == max_process_count)
2728 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2731 static void handle_hpd_rx_irq(void *param)
2733 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2734 struct drm_connector *connector = &aconnector->base;
2735 struct drm_device *dev = connector->dev;
2736 struct dc_link *dc_link = aconnector->dc_link;
2737 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2738 bool result = false;
2739 enum dc_connection_type new_connection_type = dc_connection_none;
2740 struct amdgpu_device *adev = drm_to_adev(dev);
2741 union hpd_irq_data hpd_irq_data;
2743 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2745 if (adev->dm.disable_hpd_irq)
2750 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2751 * conflict, after implement i2c helper, this mutex should be
2754 mutex_lock(&aconnector->hpd_lock);
2756 read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2758 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2759 (dc_link->type == dc_connection_mst_branch)) {
2760 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2762 dm_handle_hpd_rx_irq(aconnector);
2764 } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2766 dm_handle_hpd_rx_irq(aconnector);
2771 if (!amdgpu_in_reset(adev)) {
2772 mutex_lock(&adev->dm.dc_lock);
2773 #ifdef CONFIG_DRM_AMD_DC_HDCP
2774 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2776 result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2778 mutex_unlock(&adev->dm.dc_lock);
2782 if (result && !is_mst_root_connector) {
2783 /* Downstream Port status changed. */
2784 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2785 DRM_ERROR("KMS: Failed to detect connector\n");
2787 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2788 emulated_link_detect(dc_link);
2790 if (aconnector->fake_enable)
2791 aconnector->fake_enable = false;
2793 amdgpu_dm_update_connector_after_detect(aconnector);
2796 drm_modeset_lock_all(dev);
2797 dm_restore_drm_connector_state(dev, connector);
2798 drm_modeset_unlock_all(dev);
2800 drm_kms_helper_hotplug_event(dev);
2801 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2803 if (aconnector->fake_enable)
2804 aconnector->fake_enable = false;
2806 amdgpu_dm_update_connector_after_detect(aconnector);
2809 drm_modeset_lock_all(dev);
2810 dm_restore_drm_connector_state(dev, connector);
2811 drm_modeset_unlock_all(dev);
2813 drm_kms_helper_hotplug_event(dev);
2816 #ifdef CONFIG_DRM_AMD_DC_HDCP
2817 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2818 if (adev->dm.hdcp_workqueue)
2819 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2823 if (dc_link->type != dc_connection_mst_branch)
2824 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2826 mutex_unlock(&aconnector->hpd_lock);
2829 static void register_hpd_handlers(struct amdgpu_device *adev)
2831 struct drm_device *dev = adev_to_drm(adev);
2832 struct drm_connector *connector;
2833 struct amdgpu_dm_connector *aconnector;
2834 const struct dc_link *dc_link;
2835 struct dc_interrupt_params int_params = {0};
2837 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2838 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2840 list_for_each_entry(connector,
2841 &dev->mode_config.connector_list, head) {
2843 aconnector = to_amdgpu_dm_connector(connector);
2844 dc_link = aconnector->dc_link;
2846 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2847 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2848 int_params.irq_source = dc_link->irq_source_hpd;
2850 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2852 (void *) aconnector);
2855 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2857 /* Also register for DP short pulse (hpd_rx). */
2858 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2859 int_params.irq_source = dc_link->irq_source_hpd_rx;
2861 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2863 (void *) aconnector);
2868 #if defined(CONFIG_DRM_AMD_DC_SI)
2869 /* Register IRQ sources and initialize IRQ callbacks */
2870 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2872 struct dc *dc = adev->dm.dc;
2873 struct common_irq_params *c_irq_params;
2874 struct dc_interrupt_params int_params = {0};
2877 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2879 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2880 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2883 * Actions of amdgpu_irq_add_id():
2884 * 1. Register a set() function with base driver.
2885 * Base driver will call set() function to enable/disable an
2886 * interrupt in DC hardware.
2887 * 2. Register amdgpu_dm_irq_handler().
2888 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2889 * coming from DC hardware.
2890 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2891 * for acknowledging and handling. */
2893 /* Use VBLANK interrupt */
2894 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2895 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2897 DRM_ERROR("Failed to add crtc irq id!\n");
2901 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2902 int_params.irq_source =
2903 dc_interrupt_to_irq_source(dc, i+1 , 0);
2905 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2907 c_irq_params->adev = adev;
2908 c_irq_params->irq_src = int_params.irq_source;
2910 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2911 dm_crtc_high_irq, c_irq_params);
2914 /* Use GRPH_PFLIP interrupt */
2915 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2916 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2917 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2919 DRM_ERROR("Failed to add page flip irq id!\n");
2923 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2924 int_params.irq_source =
2925 dc_interrupt_to_irq_source(dc, i, 0);
2927 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2929 c_irq_params->adev = adev;
2930 c_irq_params->irq_src = int_params.irq_source;
2932 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2933 dm_pflip_high_irq, c_irq_params);
2938 r = amdgpu_irq_add_id(adev, client_id,
2939 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2941 DRM_ERROR("Failed to add hpd irq id!\n");
2945 register_hpd_handlers(adev);
2951 /* Register IRQ sources and initialize IRQ callbacks */
2952 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2954 struct dc *dc = adev->dm.dc;
2955 struct common_irq_params *c_irq_params;
2956 struct dc_interrupt_params int_params = {0};
2959 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2961 if (adev->asic_type >= CHIP_VEGA10)
2962 client_id = SOC15_IH_CLIENTID_DCE;
2964 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2965 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2968 * Actions of amdgpu_irq_add_id():
2969 * 1. Register a set() function with base driver.
2970 * Base driver will call set() function to enable/disable an
2971 * interrupt in DC hardware.
2972 * 2. Register amdgpu_dm_irq_handler().
2973 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2974 * coming from DC hardware.
2975 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2976 * for acknowledging and handling. */
2978 /* Use VBLANK interrupt */
2979 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2980 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2982 DRM_ERROR("Failed to add crtc irq id!\n");
2986 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2987 int_params.irq_source =
2988 dc_interrupt_to_irq_source(dc, i, 0);
2990 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2992 c_irq_params->adev = adev;
2993 c_irq_params->irq_src = int_params.irq_source;
2995 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2996 dm_crtc_high_irq, c_irq_params);
2999 /* Use VUPDATE interrupt */
3000 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3001 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3003 DRM_ERROR("Failed to add vupdate irq id!\n");
3007 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3008 int_params.irq_source =
3009 dc_interrupt_to_irq_source(dc, i, 0);
3011 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3013 c_irq_params->adev = adev;
3014 c_irq_params->irq_src = int_params.irq_source;
3016 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3017 dm_vupdate_high_irq, c_irq_params);
3020 /* Use GRPH_PFLIP interrupt */
3021 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3022 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3023 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3025 DRM_ERROR("Failed to add page flip irq id!\n");
3029 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3030 int_params.irq_source =
3031 dc_interrupt_to_irq_source(dc, i, 0);
3033 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3035 c_irq_params->adev = adev;
3036 c_irq_params->irq_src = int_params.irq_source;
3038 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3039 dm_pflip_high_irq, c_irq_params);
3044 r = amdgpu_irq_add_id(adev, client_id,
3045 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3047 DRM_ERROR("Failed to add hpd irq id!\n");
3051 register_hpd_handlers(adev);
3056 #if defined(CONFIG_DRM_AMD_DC_DCN)
3057 /* Register IRQ sources and initialize IRQ callbacks */
3058 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3060 struct dc *dc = adev->dm.dc;
3061 struct common_irq_params *c_irq_params;
3062 struct dc_interrupt_params int_params = {0};
3065 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3066 static const unsigned int vrtl_int_srcid[] = {
3067 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3068 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3069 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3070 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3071 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3072 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3076 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3077 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3080 * Actions of amdgpu_irq_add_id():
3081 * 1. Register a set() function with base driver.
3082 * Base driver will call set() function to enable/disable an
3083 * interrupt in DC hardware.
3084 * 2. Register amdgpu_dm_irq_handler().
3085 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3086 * coming from DC hardware.
3087 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3088 * for acknowledging and handling.
3091 /* Use VSTARTUP interrupt */
3092 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3093 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3095 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3098 DRM_ERROR("Failed to add crtc irq id!\n");
3102 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3103 int_params.irq_source =
3104 dc_interrupt_to_irq_source(dc, i, 0);
3106 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3108 c_irq_params->adev = adev;
3109 c_irq_params->irq_src = int_params.irq_source;
3111 amdgpu_dm_irq_register_interrupt(
3112 adev, &int_params, dm_crtc_high_irq, c_irq_params);
3115 /* Use otg vertical line interrupt */
3116 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3117 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3118 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3119 vrtl_int_srcid[i], &adev->vline0_irq);
3122 DRM_ERROR("Failed to add vline0 irq id!\n");
3126 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3127 int_params.irq_source =
3128 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3130 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3131 DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3135 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3136 - DC_IRQ_SOURCE_DC1_VLINE0];
3138 c_irq_params->adev = adev;
3139 c_irq_params->irq_src = int_params.irq_source;
3141 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3142 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3146 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3147 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3148 * to trigger at end of each vblank, regardless of state of the lock,
3149 * matching DCE behaviour.
3151 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3152 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3154 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3157 DRM_ERROR("Failed to add vupdate irq id!\n");
3161 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3162 int_params.irq_source =
3163 dc_interrupt_to_irq_source(dc, i, 0);
3165 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3167 c_irq_params->adev = adev;
3168 c_irq_params->irq_src = int_params.irq_source;
3170 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3171 dm_vupdate_high_irq, c_irq_params);
3174 /* Use GRPH_PFLIP interrupt */
3175 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3176 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3178 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3180 DRM_ERROR("Failed to add page flip irq id!\n");
3184 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3185 int_params.irq_source =
3186 dc_interrupt_to_irq_source(dc, i, 0);
3188 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3190 c_irq_params->adev = adev;
3191 c_irq_params->irq_src = int_params.irq_source;
3193 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3194 dm_pflip_high_irq, c_irq_params);
3199 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3202 DRM_ERROR("Failed to add hpd irq id!\n");
3206 register_hpd_handlers(adev);
3210 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3211 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3213 struct dc *dc = adev->dm.dc;
3214 struct common_irq_params *c_irq_params;
3215 struct dc_interrupt_params int_params = {0};
3218 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3219 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3221 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3222 &adev->dmub_outbox_irq);
3224 DRM_ERROR("Failed to add outbox irq id!\n");
3228 if (dc->ctx->dmub_srv) {
3229 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3230 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3231 int_params.irq_source =
3232 dc_interrupt_to_irq_source(dc, i, 0);
3234 c_irq_params = &adev->dm.dmub_outbox_params[0];
3236 c_irq_params->adev = adev;
3237 c_irq_params->irq_src = int_params.irq_source;
3239 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3240 dm_dmub_outbox1_low_irq, c_irq_params);
3248 * Acquires the lock for the atomic state object and returns
3249 * the new atomic state.
3251 * This should only be called during atomic check.
3253 static int dm_atomic_get_state(struct drm_atomic_state *state,
3254 struct dm_atomic_state **dm_state)
3256 struct drm_device *dev = state->dev;
3257 struct amdgpu_device *adev = drm_to_adev(dev);
3258 struct amdgpu_display_manager *dm = &adev->dm;
3259 struct drm_private_state *priv_state;
3264 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3265 if (IS_ERR(priv_state))
3266 return PTR_ERR(priv_state);
3268 *dm_state = to_dm_atomic_state(priv_state);
3273 static struct dm_atomic_state *
3274 dm_atomic_get_new_state(struct drm_atomic_state *state)
3276 struct drm_device *dev = state->dev;
3277 struct amdgpu_device *adev = drm_to_adev(dev);
3278 struct amdgpu_display_manager *dm = &adev->dm;
3279 struct drm_private_obj *obj;
3280 struct drm_private_state *new_obj_state;
3283 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3284 if (obj->funcs == dm->atomic_obj.funcs)
3285 return to_dm_atomic_state(new_obj_state);
3291 static struct drm_private_state *
3292 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3294 struct dm_atomic_state *old_state, *new_state;
3296 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3300 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3302 old_state = to_dm_atomic_state(obj->state);
3304 if (old_state && old_state->context)
3305 new_state->context = dc_copy_state(old_state->context);
3307 if (!new_state->context) {
3312 return &new_state->base;
3315 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3316 struct drm_private_state *state)
3318 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3320 if (dm_state && dm_state->context)
3321 dc_release_state(dm_state->context);
3326 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3327 .atomic_duplicate_state = dm_atomic_duplicate_state,
3328 .atomic_destroy_state = dm_atomic_destroy_state,
3331 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3333 struct dm_atomic_state *state;
3336 adev->mode_info.mode_config_initialized = true;
3338 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3339 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3341 adev_to_drm(adev)->mode_config.max_width = 16384;
3342 adev_to_drm(adev)->mode_config.max_height = 16384;
3344 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3345 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3346 /* indicates support for immediate flip */
3347 adev_to_drm(adev)->mode_config.async_page_flip = true;
3349 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3351 state = kzalloc(sizeof(*state), GFP_KERNEL);
3355 state->context = dc_create_state(adev->dm.dc);
3356 if (!state->context) {
3361 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3363 drm_atomic_private_obj_init(adev_to_drm(adev),
3364 &adev->dm.atomic_obj,
3366 &dm_atomic_state_funcs);
3368 r = amdgpu_display_modeset_create_props(adev);
3370 dc_release_state(state->context);
3375 r = amdgpu_dm_audio_init(adev);
3377 dc_release_state(state->context);
3385 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3386 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3387 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3389 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3390 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3392 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3394 #if defined(CONFIG_ACPI)
3395 struct amdgpu_dm_backlight_caps caps;
3397 memset(&caps, 0, sizeof(caps));
3399 if (dm->backlight_caps.caps_valid)
3402 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3403 if (caps.caps_valid) {
3404 dm->backlight_caps.caps_valid = true;
3405 if (caps.aux_support)
3407 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3408 dm->backlight_caps.max_input_signal = caps.max_input_signal;
3410 dm->backlight_caps.min_input_signal =
3411 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3412 dm->backlight_caps.max_input_signal =
3413 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3416 if (dm->backlight_caps.aux_support)
3419 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3420 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3424 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3425 unsigned *min, unsigned *max)
3430 if (caps->aux_support) {
3431 // Firmware limits are in nits, DC API wants millinits.
3432 *max = 1000 * caps->aux_max_input_signal;
3433 *min = 1000 * caps->aux_min_input_signal;
3435 // Firmware limits are 8-bit, PWM control is 16-bit.
3436 *max = 0x101 * caps->max_input_signal;
3437 *min = 0x101 * caps->min_input_signal;
3442 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3443 uint32_t brightness)
3447 if (!get_brightness_range(caps, &min, &max))
3450 // Rescale 0..255 to min..max
3451 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3452 AMDGPU_MAX_BL_LEVEL);
3455 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3456 uint32_t brightness)
3460 if (!get_brightness_range(caps, &min, &max))
3463 if (brightness < min)
3465 // Rescale min..max to 0..255
3466 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3470 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3472 struct amdgpu_display_manager *dm = bl_get_data(bd);
3473 struct amdgpu_dm_backlight_caps caps;
3474 struct dc_link *link[AMDGPU_DM_MAX_NUM_EDP];
3479 amdgpu_dm_update_backlight_caps(dm);
3480 caps = dm->backlight_caps;
3482 for (i = 0; i < dm->num_of_edps; i++)
3483 link[i] = (struct dc_link *)dm->backlight_link[i];
3485 brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3486 // Change brightness based on AUX property
3487 if (caps.aux_support) {
3488 for (i = 0; i < dm->num_of_edps; i++) {
3489 rc = dc_link_set_backlight_level_nits(link[i], true, brightness,
3490 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3492 DRM_ERROR("DM: Failed to update backlight via AUX on eDP[%d]\n", i);
3497 for (i = 0; i < dm->num_of_edps; i++) {
3498 rc = dc_link_set_backlight_level(dm->backlight_link[i], brightness, 0);
3500 DRM_ERROR("DM: Failed to update backlight on eDP[%d]\n", i);
3509 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3511 struct amdgpu_display_manager *dm = bl_get_data(bd);
3512 struct amdgpu_dm_backlight_caps caps;
3514 amdgpu_dm_update_backlight_caps(dm);
3515 caps = dm->backlight_caps;
3517 if (caps.aux_support) {
3518 struct dc_link *link = (struct dc_link *)dm->backlight_link[0];
3522 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3524 return bd->props.brightness;
3525 return convert_brightness_to_user(&caps, avg);
3527 int ret = dc_link_get_backlight_level(dm->backlight_link[0]);
3529 if (ret == DC_ERROR_UNEXPECTED)
3530 return bd->props.brightness;
3531 return convert_brightness_to_user(&caps, ret);
3535 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3536 .options = BL_CORE_SUSPENDRESUME,
3537 .get_brightness = amdgpu_dm_backlight_get_brightness,
3538 .update_status = amdgpu_dm_backlight_update_status,
3542 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3545 struct backlight_properties props = { 0 };
3547 amdgpu_dm_update_backlight_caps(dm);
3549 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3550 props.brightness = AMDGPU_MAX_BL_LEVEL;
3551 props.type = BACKLIGHT_RAW;
3553 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3554 adev_to_drm(dm->adev)->primary->index);
3556 dm->backlight_dev = backlight_device_register(bl_name,
3557 adev_to_drm(dm->adev)->dev,
3559 &amdgpu_dm_backlight_ops,
3562 if (IS_ERR(dm->backlight_dev))
3563 DRM_ERROR("DM: Backlight registration failed!\n");
3565 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3570 static int initialize_plane(struct amdgpu_display_manager *dm,
3571 struct amdgpu_mode_info *mode_info, int plane_id,
3572 enum drm_plane_type plane_type,
3573 const struct dc_plane_cap *plane_cap)
3575 struct drm_plane *plane;
3576 unsigned long possible_crtcs;
3579 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3581 DRM_ERROR("KMS: Failed to allocate plane\n");
3584 plane->type = plane_type;
3587 * HACK: IGT tests expect that the primary plane for a CRTC
3588 * can only have one possible CRTC. Only expose support for
3589 * any CRTC if they're not going to be used as a primary plane
3590 * for a CRTC - like overlay or underlay planes.
3592 possible_crtcs = 1 << plane_id;
3593 if (plane_id >= dm->dc->caps.max_streams)
3594 possible_crtcs = 0xff;
3596 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3599 DRM_ERROR("KMS: Failed to initialize plane\n");
3605 mode_info->planes[plane_id] = plane;
3611 static void register_backlight_device(struct amdgpu_display_manager *dm,
3612 struct dc_link *link)
3614 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3615 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3617 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3618 link->type != dc_connection_none) {
3620 * Event if registration failed, we should continue with
3621 * DM initialization because not having a backlight control
3622 * is better then a black screen.
3624 if (!dm->backlight_dev)
3625 amdgpu_dm_register_backlight_device(dm);
3627 if (dm->backlight_dev) {
3628 dm->backlight_link[dm->num_of_edps] = link;
3637 * In this architecture, the association
3638 * connector -> encoder -> crtc
3639 * id not really requried. The crtc and connector will hold the
3640 * display_index as an abstraction to use with DAL component
3642 * Returns 0 on success
3644 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3646 struct amdgpu_display_manager *dm = &adev->dm;
3648 struct amdgpu_dm_connector *aconnector = NULL;
3649 struct amdgpu_encoder *aencoder = NULL;
3650 struct amdgpu_mode_info *mode_info = &adev->mode_info;
3652 int32_t primary_planes;
3653 enum dc_connection_type new_connection_type = dc_connection_none;
3654 const struct dc_plane_cap *plane;
3656 dm->display_indexes_num = dm->dc->caps.max_streams;
3657 /* Update the actual used number of crtc */
3658 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3660 link_cnt = dm->dc->caps.max_links;
3661 if (amdgpu_dm_mode_config_init(dm->adev)) {
3662 DRM_ERROR("DM: Failed to initialize mode config\n");
3666 /* There is one primary plane per CRTC */
3667 primary_planes = dm->dc->caps.max_streams;
3668 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3671 * Initialize primary planes, implicit planes for legacy IOCTLS.
3672 * Order is reversed to match iteration order in atomic check.
3674 for (i = (primary_planes - 1); i >= 0; i--) {
3675 plane = &dm->dc->caps.planes[i];
3677 if (initialize_plane(dm, mode_info, i,
3678 DRM_PLANE_TYPE_PRIMARY, plane)) {
3679 DRM_ERROR("KMS: Failed to initialize primary plane\n");
3685 * Initialize overlay planes, index starting after primary planes.
3686 * These planes have a higher DRM index than the primary planes since
3687 * they should be considered as having a higher z-order.
3688 * Order is reversed to match iteration order in atomic check.
3690 * Only support DCN for now, and only expose one so we don't encourage
3691 * userspace to use up all the pipes.
3693 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3694 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3696 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3699 if (!plane->blends_with_above || !plane->blends_with_below)
3702 if (!plane->pixel_format_support.argb8888)
3705 if (initialize_plane(dm, NULL, primary_planes + i,
3706 DRM_PLANE_TYPE_OVERLAY, plane)) {
3707 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3711 /* Only create one overlay plane. */
3715 for (i = 0; i < dm->dc->caps.max_streams; i++)
3716 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3717 DRM_ERROR("KMS: Failed to initialize crtc\n");
3721 #if defined(CONFIG_DRM_AMD_DC_DCN)
3722 /* Use Outbox interrupt */
3723 switch (adev->asic_type) {
3724 case CHIP_SIENNA_CICHLID:
3725 case CHIP_NAVY_FLOUNDER:
3727 if (register_outbox_irq_handlers(dm->adev)) {
3728 DRM_ERROR("DM: Failed to initialize IRQ\n");
3733 DRM_DEBUG_KMS("Unsupported ASIC type for outbox: 0x%X\n", adev->asic_type);
3737 /* loops over all connectors on the board */
3738 for (i = 0; i < link_cnt; i++) {
3739 struct dc_link *link = NULL;
3741 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3743 "KMS: Cannot support more than %d display indexes\n",
3744 AMDGPU_DM_MAX_DISPLAY_INDEX);
3748 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3752 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3756 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3757 DRM_ERROR("KMS: Failed to initialize encoder\n");
3761 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3762 DRM_ERROR("KMS: Failed to initialize connector\n");
3766 link = dc_get_link_at_index(dm->dc, i);
3768 if (!dc_link_detect_sink(link, &new_connection_type))
3769 DRM_ERROR("KMS: Failed to detect connector\n");
3771 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3772 emulated_link_detect(link);
3773 amdgpu_dm_update_connector_after_detect(aconnector);
3775 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3776 amdgpu_dm_update_connector_after_detect(aconnector);
3777 register_backlight_device(dm, link);
3778 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3779 amdgpu_dm_set_psr_caps(link);
3785 /* Software is initialized. Now we can register interrupt handlers. */
3786 switch (adev->asic_type) {
3787 #if defined(CONFIG_DRM_AMD_DC_SI)
3792 if (dce60_register_irq_handlers(dm->adev)) {
3793 DRM_ERROR("DM: Failed to initialize IRQ\n");
3807 case CHIP_POLARIS11:
3808 case CHIP_POLARIS10:
3809 case CHIP_POLARIS12:
3814 if (dce110_register_irq_handlers(dm->adev)) {
3815 DRM_ERROR("DM: Failed to initialize IRQ\n");
3819 #if defined(CONFIG_DRM_AMD_DC_DCN)
3825 case CHIP_SIENNA_CICHLID:
3826 case CHIP_NAVY_FLOUNDER:
3827 case CHIP_DIMGREY_CAVEFISH:
3828 case CHIP_BEIGE_GOBY:
3830 if (dcn10_register_irq_handlers(dm->adev)) {
3831 DRM_ERROR("DM: Failed to initialize IRQ\n");
3837 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3849 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3851 drm_mode_config_cleanup(dm->ddev);
3852 drm_atomic_private_obj_fini(&dm->atomic_obj);
3856 /******************************************************************************
3857 * amdgpu_display_funcs functions
3858 *****************************************************************************/
3861 * dm_bandwidth_update - program display watermarks
3863 * @adev: amdgpu_device pointer
3865 * Calculate and program the display watermarks and line buffer allocation.
3867 static void dm_bandwidth_update(struct amdgpu_device *adev)
3869 /* TODO: implement later */
3872 static const struct amdgpu_display_funcs dm_display_funcs = {
3873 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3874 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3875 .backlight_set_level = NULL, /* never called for DC */
3876 .backlight_get_level = NULL, /* never called for DC */
3877 .hpd_sense = NULL,/* called unconditionally */
3878 .hpd_set_polarity = NULL, /* called unconditionally */
3879 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3880 .page_flip_get_scanoutpos =
3881 dm_crtc_get_scanoutpos,/* called unconditionally */
3882 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3883 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3886 #if defined(CONFIG_DEBUG_KERNEL_DC)
3888 static ssize_t s3_debug_store(struct device *device,
3889 struct device_attribute *attr,
3895 struct drm_device *drm_dev = dev_get_drvdata(device);
3896 struct amdgpu_device *adev = drm_to_adev(drm_dev);
3898 ret = kstrtoint(buf, 0, &s3_state);
3903 drm_kms_helper_hotplug_event(adev_to_drm(adev));
3908 return ret == 0 ? count : 0;
3911 DEVICE_ATTR_WO(s3_debug);
3915 static int dm_early_init(void *handle)
3917 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3919 switch (adev->asic_type) {
3920 #if defined(CONFIG_DRM_AMD_DC_SI)
3924 adev->mode_info.num_crtc = 6;
3925 adev->mode_info.num_hpd = 6;
3926 adev->mode_info.num_dig = 6;
3929 adev->mode_info.num_crtc = 2;
3930 adev->mode_info.num_hpd = 2;
3931 adev->mode_info.num_dig = 2;
3936 adev->mode_info.num_crtc = 6;
3937 adev->mode_info.num_hpd = 6;
3938 adev->mode_info.num_dig = 6;
3941 adev->mode_info.num_crtc = 4;
3942 adev->mode_info.num_hpd = 6;
3943 adev->mode_info.num_dig = 7;
3947 adev->mode_info.num_crtc = 2;
3948 adev->mode_info.num_hpd = 6;
3949 adev->mode_info.num_dig = 6;
3953 adev->mode_info.num_crtc = 6;
3954 adev->mode_info.num_hpd = 6;
3955 adev->mode_info.num_dig = 7;
3958 adev->mode_info.num_crtc = 3;
3959 adev->mode_info.num_hpd = 6;
3960 adev->mode_info.num_dig = 9;
3963 adev->mode_info.num_crtc = 2;
3964 adev->mode_info.num_hpd = 6;
3965 adev->mode_info.num_dig = 9;
3967 case CHIP_POLARIS11:
3968 case CHIP_POLARIS12:
3969 adev->mode_info.num_crtc = 5;
3970 adev->mode_info.num_hpd = 5;
3971 adev->mode_info.num_dig = 5;
3973 case CHIP_POLARIS10:
3975 adev->mode_info.num_crtc = 6;
3976 adev->mode_info.num_hpd = 6;
3977 adev->mode_info.num_dig = 6;
3982 adev->mode_info.num_crtc = 6;
3983 adev->mode_info.num_hpd = 6;
3984 adev->mode_info.num_dig = 6;
3986 #if defined(CONFIG_DRM_AMD_DC_DCN)
3990 adev->mode_info.num_crtc = 4;
3991 adev->mode_info.num_hpd = 4;
3992 adev->mode_info.num_dig = 4;
3996 case CHIP_SIENNA_CICHLID:
3997 case CHIP_NAVY_FLOUNDER:
3998 adev->mode_info.num_crtc = 6;
3999 adev->mode_info.num_hpd = 6;
4000 adev->mode_info.num_dig = 6;
4003 case CHIP_DIMGREY_CAVEFISH:
4004 adev->mode_info.num_crtc = 5;
4005 adev->mode_info.num_hpd = 5;
4006 adev->mode_info.num_dig = 5;
4008 case CHIP_BEIGE_GOBY:
4009 adev->mode_info.num_crtc = 2;
4010 adev->mode_info.num_hpd = 2;
4011 adev->mode_info.num_dig = 2;
4015 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4019 amdgpu_dm_set_irq_funcs(adev);
4021 if (adev->mode_info.funcs == NULL)
4022 adev->mode_info.funcs = &dm_display_funcs;
4025 * Note: Do NOT change adev->audio_endpt_rreg and
4026 * adev->audio_endpt_wreg because they are initialised in
4027 * amdgpu_device_init()
4029 #if defined(CONFIG_DEBUG_KERNEL_DC)
4031 adev_to_drm(adev)->dev,
4032 &dev_attr_s3_debug);
4038 static bool modeset_required(struct drm_crtc_state *crtc_state,
4039 struct dc_stream_state *new_stream,
4040 struct dc_stream_state *old_stream)
4042 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4045 static bool modereset_required(struct drm_crtc_state *crtc_state)
4047 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4050 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4052 drm_encoder_cleanup(encoder);
4056 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4057 .destroy = amdgpu_dm_encoder_destroy,
4061 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4062 struct drm_framebuffer *fb,
4063 int *min_downscale, int *max_upscale)
4065 struct amdgpu_device *adev = drm_to_adev(dev);
4066 struct dc *dc = adev->dm.dc;
4067 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4068 struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4070 switch (fb->format->format) {
4071 case DRM_FORMAT_P010:
4072 case DRM_FORMAT_NV12:
4073 case DRM_FORMAT_NV21:
4074 *max_upscale = plane_cap->max_upscale_factor.nv12;
4075 *min_downscale = plane_cap->max_downscale_factor.nv12;
4078 case DRM_FORMAT_XRGB16161616F:
4079 case DRM_FORMAT_ARGB16161616F:
4080 case DRM_FORMAT_XBGR16161616F:
4081 case DRM_FORMAT_ABGR16161616F:
4082 *max_upscale = plane_cap->max_upscale_factor.fp16;
4083 *min_downscale = plane_cap->max_downscale_factor.fp16;
4087 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4088 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4093 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4094 * scaling factor of 1.0 == 1000 units.
4096 if (*max_upscale == 1)
4097 *max_upscale = 1000;
4099 if (*min_downscale == 1)
4100 *min_downscale = 1000;
4104 static int fill_dc_scaling_info(const struct drm_plane_state *state,
4105 struct dc_scaling_info *scaling_info)
4107 int scale_w, scale_h, min_downscale, max_upscale;
4109 memset(scaling_info, 0, sizeof(*scaling_info));
4111 /* Source is fixed 16.16 but we ignore mantissa for now... */
4112 scaling_info->src_rect.x = state->src_x >> 16;
4113 scaling_info->src_rect.y = state->src_y >> 16;
4116 * For reasons we don't (yet) fully understand a non-zero
4117 * src_y coordinate into an NV12 buffer can cause a
4118 * system hang. To avoid hangs (and maybe be overly cautious)
4119 * let's reject both non-zero src_x and src_y.
4121 * We currently know of only one use-case to reproduce a
4122 * scenario with non-zero src_x and src_y for NV12, which
4123 * is to gesture the YouTube Android app into full screen
4127 state->fb->format->format == DRM_FORMAT_NV12 &&
4128 (scaling_info->src_rect.x != 0 ||
4129 scaling_info->src_rect.y != 0))
4132 scaling_info->src_rect.width = state->src_w >> 16;
4133 if (scaling_info->src_rect.width == 0)
4136 scaling_info->src_rect.height = state->src_h >> 16;
4137 if (scaling_info->src_rect.height == 0)
4140 scaling_info->dst_rect.x = state->crtc_x;
4141 scaling_info->dst_rect.y = state->crtc_y;
4143 if (state->crtc_w == 0)
4146 scaling_info->dst_rect.width = state->crtc_w;
4148 if (state->crtc_h == 0)
4151 scaling_info->dst_rect.height = state->crtc_h;
4153 /* DRM doesn't specify clipping on destination output. */
4154 scaling_info->clip_rect = scaling_info->dst_rect;
4156 /* Validate scaling per-format with DC plane caps */
4157 if (state->plane && state->plane->dev && state->fb) {
4158 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4159 &min_downscale, &max_upscale);
4161 min_downscale = 250;
4162 max_upscale = 16000;
4165 scale_w = scaling_info->dst_rect.width * 1000 /
4166 scaling_info->src_rect.width;
4168 if (scale_w < min_downscale || scale_w > max_upscale)
4171 scale_h = scaling_info->dst_rect.height * 1000 /
4172 scaling_info->src_rect.height;
4174 if (scale_h < min_downscale || scale_h > max_upscale)
4178 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4179 * assume reasonable defaults based on the format.
4186 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4187 uint64_t tiling_flags)
4189 /* Fill GFX8 params */
4190 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4191 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4193 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4194 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4195 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4196 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4197 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4199 /* XXX fix me for VI */
4200 tiling_info->gfx8.num_banks = num_banks;
4201 tiling_info->gfx8.array_mode =
4202 DC_ARRAY_2D_TILED_THIN1;
4203 tiling_info->gfx8.tile_split = tile_split;
4204 tiling_info->gfx8.bank_width = bankw;
4205 tiling_info->gfx8.bank_height = bankh;
4206 tiling_info->gfx8.tile_aspect = mtaspect;
4207 tiling_info->gfx8.tile_mode =
4208 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4209 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4210 == DC_ARRAY_1D_TILED_THIN1) {
4211 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4214 tiling_info->gfx8.pipe_config =
4215 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4219 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4220 union dc_tiling_info *tiling_info)
4222 tiling_info->gfx9.num_pipes =
4223 adev->gfx.config.gb_addr_config_fields.num_pipes;
4224 tiling_info->gfx9.num_banks =
4225 adev->gfx.config.gb_addr_config_fields.num_banks;
4226 tiling_info->gfx9.pipe_interleave =
4227 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4228 tiling_info->gfx9.num_shader_engines =
4229 adev->gfx.config.gb_addr_config_fields.num_se;
4230 tiling_info->gfx9.max_compressed_frags =
4231 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4232 tiling_info->gfx9.num_rb_per_se =
4233 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4234 tiling_info->gfx9.shaderEnable = 1;
4235 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4236 adev->asic_type == CHIP_NAVY_FLOUNDER ||
4237 adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4238 adev->asic_type == CHIP_BEIGE_GOBY ||
4239 adev->asic_type == CHIP_VANGOGH)
4240 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4244 validate_dcc(struct amdgpu_device *adev,
4245 const enum surface_pixel_format format,
4246 const enum dc_rotation_angle rotation,
4247 const union dc_tiling_info *tiling_info,
4248 const struct dc_plane_dcc_param *dcc,
4249 const struct dc_plane_address *address,
4250 const struct plane_size *plane_size)
4252 struct dc *dc = adev->dm.dc;
4253 struct dc_dcc_surface_param input;
4254 struct dc_surface_dcc_cap output;
4256 memset(&input, 0, sizeof(input));
4257 memset(&output, 0, sizeof(output));
4262 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4263 !dc->cap_funcs.get_dcc_compression_cap)
4266 input.format = format;
4267 input.surface_size.width = plane_size->surface_size.width;
4268 input.surface_size.height = plane_size->surface_size.height;
4269 input.swizzle_mode = tiling_info->gfx9.swizzle;
4271 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4272 input.scan = SCAN_DIRECTION_HORIZONTAL;
4273 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4274 input.scan = SCAN_DIRECTION_VERTICAL;
4276 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4279 if (!output.capable)
4282 if (dcc->independent_64b_blks == 0 &&
4283 output.grph.rgb.independent_64b_blks != 0)
4290 modifier_has_dcc(uint64_t modifier)
4292 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4296 modifier_gfx9_swizzle_mode(uint64_t modifier)
4298 if (modifier == DRM_FORMAT_MOD_LINEAR)
4301 return AMD_FMT_MOD_GET(TILE, modifier);
4304 static const struct drm_format_info *
4305 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4307 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4311 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4312 union dc_tiling_info *tiling_info,
4315 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4316 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4317 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4318 unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4320 fill_gfx9_tiling_info_from_device(adev, tiling_info);
4322 if (!IS_AMD_FMT_MOD(modifier))
4325 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4326 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4328 if (adev->family >= AMDGPU_FAMILY_NV) {
4329 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4331 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4333 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4337 enum dm_micro_swizzle {
4338 MICRO_SWIZZLE_Z = 0,
4339 MICRO_SWIZZLE_S = 1,
4340 MICRO_SWIZZLE_D = 2,
4344 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4348 struct amdgpu_device *adev = drm_to_adev(plane->dev);
4349 const struct drm_format_info *info = drm_format_info(format);
4352 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4358 * We always have to allow these modifiers:
4359 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4360 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4362 if (modifier == DRM_FORMAT_MOD_LINEAR ||
4363 modifier == DRM_FORMAT_MOD_INVALID) {
4367 /* Check that the modifier is on the list of the plane's supported modifiers. */
4368 for (i = 0; i < plane->modifier_count; i++) {
4369 if (modifier == plane->modifiers[i])
4372 if (i == plane->modifier_count)
4376 * For D swizzle the canonical modifier depends on the bpp, so check
4379 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4380 adev->family >= AMDGPU_FAMILY_NV) {
4381 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4385 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4389 if (modifier_has_dcc(modifier)) {
4390 /* Per radeonsi comments 16/64 bpp are more complicated. */
4391 if (info->cpp[0] != 4)
4393 /* We support multi-planar formats, but not when combined with
4394 * additional DCC metadata planes. */
4395 if (info->num_planes > 1)
4403 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4408 if (*cap - *size < 1) {
4409 uint64_t new_cap = *cap * 2;
4410 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4418 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4424 (*mods)[*size] = mod;
4429 add_gfx9_modifiers(const struct amdgpu_device *adev,
4430 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4432 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4433 int pipe_xor_bits = min(8, pipes +
4434 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4435 int bank_xor_bits = min(8 - pipe_xor_bits,
4436 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4437 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4438 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4441 if (adev->family == AMDGPU_FAMILY_RV) {
4442 /* Raven2 and later */
4443 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4446 * No _D DCC swizzles yet because we only allow 32bpp, which
4447 * doesn't support _D on DCN
4450 if (has_constant_encode) {
4451 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4452 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4453 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4454 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4455 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4456 AMD_FMT_MOD_SET(DCC, 1) |
4457 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4458 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4459 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4462 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4463 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4464 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4465 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4466 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4467 AMD_FMT_MOD_SET(DCC, 1) |
4468 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4469 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4470 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4472 if (has_constant_encode) {
4473 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4474 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4475 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4476 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4477 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4478 AMD_FMT_MOD_SET(DCC, 1) |
4479 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4480 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4481 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4483 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4484 AMD_FMT_MOD_SET(RB, rb) |
4485 AMD_FMT_MOD_SET(PIPE, pipes));
4488 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4489 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4490 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4491 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4492 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4493 AMD_FMT_MOD_SET(DCC, 1) |
4494 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4495 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4496 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4497 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4498 AMD_FMT_MOD_SET(RB, rb) |
4499 AMD_FMT_MOD_SET(PIPE, pipes));
4503 * Only supported for 64bpp on Raven, will be filtered on format in
4504 * dm_plane_format_mod_supported.
4506 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4507 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4508 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4509 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4510 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4512 if (adev->family == AMDGPU_FAMILY_RV) {
4513 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4514 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4515 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4516 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4517 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4521 * Only supported for 64bpp on Raven, will be filtered on format in
4522 * dm_plane_format_mod_supported.
4524 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4525 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4526 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4528 if (adev->family == AMDGPU_FAMILY_RV) {
4529 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4530 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4531 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4536 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4537 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4539 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4541 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4542 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4543 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4544 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4545 AMD_FMT_MOD_SET(DCC, 1) |
4546 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4547 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4548 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4550 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4551 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4552 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4553 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4554 AMD_FMT_MOD_SET(DCC, 1) |
4555 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4556 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4557 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4558 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4560 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4561 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4562 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4563 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4565 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4566 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4567 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4568 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4571 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4572 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4573 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4574 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4576 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4577 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4578 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4582 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4583 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4585 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4586 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4588 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4589 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4590 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4591 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4592 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4593 AMD_FMT_MOD_SET(DCC, 1) |
4594 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4595 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4596 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4597 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4599 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4600 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4601 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4602 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4603 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4604 AMD_FMT_MOD_SET(DCC, 1) |
4605 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4606 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4607 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4608 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4609 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4611 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4612 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4613 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4614 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4615 AMD_FMT_MOD_SET(PACKERS, pkrs));
4617 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4618 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4619 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4620 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4621 AMD_FMT_MOD_SET(PACKERS, pkrs));
4623 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4624 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4625 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4626 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4628 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4629 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4630 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4634 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4636 uint64_t size = 0, capacity = 128;
4639 /* We have not hooked up any pre-GFX9 modifiers. */
4640 if (adev->family < AMDGPU_FAMILY_AI)
4643 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4645 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4646 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4647 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4648 return *mods ? 0 : -ENOMEM;
4651 switch (adev->family) {
4652 case AMDGPU_FAMILY_AI:
4653 case AMDGPU_FAMILY_RV:
4654 add_gfx9_modifiers(adev, mods, &size, &capacity);
4656 case AMDGPU_FAMILY_NV:
4657 case AMDGPU_FAMILY_VGH:
4658 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4659 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4661 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4665 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4667 /* INVALID marks the end of the list. */
4668 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4677 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4678 const struct amdgpu_framebuffer *afb,
4679 const enum surface_pixel_format format,
4680 const enum dc_rotation_angle rotation,
4681 const struct plane_size *plane_size,
4682 union dc_tiling_info *tiling_info,
4683 struct dc_plane_dcc_param *dcc,
4684 struct dc_plane_address *address,
4685 const bool force_disable_dcc)
4687 const uint64_t modifier = afb->base.modifier;
4690 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4691 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4693 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4694 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4697 dcc->meta_pitch = afb->base.pitches[1];
4698 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4700 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4701 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4704 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4712 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4713 const struct amdgpu_framebuffer *afb,
4714 const enum surface_pixel_format format,
4715 const enum dc_rotation_angle rotation,
4716 const uint64_t tiling_flags,
4717 union dc_tiling_info *tiling_info,
4718 struct plane_size *plane_size,
4719 struct dc_plane_dcc_param *dcc,
4720 struct dc_plane_address *address,
4722 bool force_disable_dcc)
4724 const struct drm_framebuffer *fb = &afb->base;
4727 memset(tiling_info, 0, sizeof(*tiling_info));
4728 memset(plane_size, 0, sizeof(*plane_size));
4729 memset(dcc, 0, sizeof(*dcc));
4730 memset(address, 0, sizeof(*address));
4732 address->tmz_surface = tmz_surface;
4734 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4735 uint64_t addr = afb->address + fb->offsets[0];
4737 plane_size->surface_size.x = 0;
4738 plane_size->surface_size.y = 0;
4739 plane_size->surface_size.width = fb->width;
4740 plane_size->surface_size.height = fb->height;
4741 plane_size->surface_pitch =
4742 fb->pitches[0] / fb->format->cpp[0];
4744 address->type = PLN_ADDR_TYPE_GRAPHICS;
4745 address->grph.addr.low_part = lower_32_bits(addr);
4746 address->grph.addr.high_part = upper_32_bits(addr);
4747 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4748 uint64_t luma_addr = afb->address + fb->offsets[0];
4749 uint64_t chroma_addr = afb->address + fb->offsets[1];
4751 plane_size->surface_size.x = 0;
4752 plane_size->surface_size.y = 0;
4753 plane_size->surface_size.width = fb->width;
4754 plane_size->surface_size.height = fb->height;
4755 plane_size->surface_pitch =
4756 fb->pitches[0] / fb->format->cpp[0];
4758 plane_size->chroma_size.x = 0;
4759 plane_size->chroma_size.y = 0;
4760 /* TODO: set these based on surface format */
4761 plane_size->chroma_size.width = fb->width / 2;
4762 plane_size->chroma_size.height = fb->height / 2;
4764 plane_size->chroma_pitch =
4765 fb->pitches[1] / fb->format->cpp[1];
4767 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4768 address->video_progressive.luma_addr.low_part =
4769 lower_32_bits(luma_addr);
4770 address->video_progressive.luma_addr.high_part =
4771 upper_32_bits(luma_addr);
4772 address->video_progressive.chroma_addr.low_part =
4773 lower_32_bits(chroma_addr);
4774 address->video_progressive.chroma_addr.high_part =
4775 upper_32_bits(chroma_addr);
4778 if (adev->family >= AMDGPU_FAMILY_AI) {
4779 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4780 rotation, plane_size,
4787 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4794 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4795 bool *per_pixel_alpha, bool *global_alpha,
4796 int *global_alpha_value)
4798 *per_pixel_alpha = false;
4799 *global_alpha = false;
4800 *global_alpha_value = 0xff;
4802 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4805 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4806 static const uint32_t alpha_formats[] = {
4807 DRM_FORMAT_ARGB8888,
4808 DRM_FORMAT_RGBA8888,
4809 DRM_FORMAT_ABGR8888,
4811 uint32_t format = plane_state->fb->format->format;
4814 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4815 if (format == alpha_formats[i]) {
4816 *per_pixel_alpha = true;
4822 if (plane_state->alpha < 0xffff) {
4823 *global_alpha = true;
4824 *global_alpha_value = plane_state->alpha >> 8;
4829 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4830 const enum surface_pixel_format format,
4831 enum dc_color_space *color_space)
4835 *color_space = COLOR_SPACE_SRGB;
4837 /* DRM color properties only affect non-RGB formats. */
4838 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4841 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4843 switch (plane_state->color_encoding) {
4844 case DRM_COLOR_YCBCR_BT601:
4846 *color_space = COLOR_SPACE_YCBCR601;
4848 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4851 case DRM_COLOR_YCBCR_BT709:
4853 *color_space = COLOR_SPACE_YCBCR709;
4855 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4858 case DRM_COLOR_YCBCR_BT2020:
4860 *color_space = COLOR_SPACE_2020_YCBCR;
4873 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4874 const struct drm_plane_state *plane_state,
4875 const uint64_t tiling_flags,
4876 struct dc_plane_info *plane_info,
4877 struct dc_plane_address *address,
4879 bool force_disable_dcc)
4881 const struct drm_framebuffer *fb = plane_state->fb;
4882 const struct amdgpu_framebuffer *afb =
4883 to_amdgpu_framebuffer(plane_state->fb);
4886 memset(plane_info, 0, sizeof(*plane_info));
4888 switch (fb->format->format) {
4890 plane_info->format =
4891 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4893 case DRM_FORMAT_RGB565:
4894 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4896 case DRM_FORMAT_XRGB8888:
4897 case DRM_FORMAT_ARGB8888:
4898 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4900 case DRM_FORMAT_XRGB2101010:
4901 case DRM_FORMAT_ARGB2101010:
4902 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4904 case DRM_FORMAT_XBGR2101010:
4905 case DRM_FORMAT_ABGR2101010:
4906 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4908 case DRM_FORMAT_XBGR8888:
4909 case DRM_FORMAT_ABGR8888:
4910 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4912 case DRM_FORMAT_NV21:
4913 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4915 case DRM_FORMAT_NV12:
4916 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4918 case DRM_FORMAT_P010:
4919 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4921 case DRM_FORMAT_XRGB16161616F:
4922 case DRM_FORMAT_ARGB16161616F:
4923 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4925 case DRM_FORMAT_XBGR16161616F:
4926 case DRM_FORMAT_ABGR16161616F:
4927 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4931 "Unsupported screen format %p4cc\n",
4932 &fb->format->format);
4936 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4937 case DRM_MODE_ROTATE_0:
4938 plane_info->rotation = ROTATION_ANGLE_0;
4940 case DRM_MODE_ROTATE_90:
4941 plane_info->rotation = ROTATION_ANGLE_90;
4943 case DRM_MODE_ROTATE_180:
4944 plane_info->rotation = ROTATION_ANGLE_180;
4946 case DRM_MODE_ROTATE_270:
4947 plane_info->rotation = ROTATION_ANGLE_270;
4950 plane_info->rotation = ROTATION_ANGLE_0;
4954 plane_info->visible = true;
4955 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4957 plane_info->layer_index = 0;
4959 ret = fill_plane_color_attributes(plane_state, plane_info->format,
4960 &plane_info->color_space);
4964 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4965 plane_info->rotation, tiling_flags,
4966 &plane_info->tiling_info,
4967 &plane_info->plane_size,
4968 &plane_info->dcc, address, tmz_surface,
4973 fill_blending_from_plane_state(
4974 plane_state, &plane_info->per_pixel_alpha,
4975 &plane_info->global_alpha, &plane_info->global_alpha_value);
4980 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4981 struct dc_plane_state *dc_plane_state,
4982 struct drm_plane_state *plane_state,
4983 struct drm_crtc_state *crtc_state)
4985 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4986 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4987 struct dc_scaling_info scaling_info;
4988 struct dc_plane_info plane_info;
4990 bool force_disable_dcc = false;
4992 ret = fill_dc_scaling_info(plane_state, &scaling_info);
4996 dc_plane_state->src_rect = scaling_info.src_rect;
4997 dc_plane_state->dst_rect = scaling_info.dst_rect;
4998 dc_plane_state->clip_rect = scaling_info.clip_rect;
4999 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5001 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5002 ret = fill_dc_plane_info_and_addr(adev, plane_state,
5005 &dc_plane_state->address,
5011 dc_plane_state->format = plane_info.format;
5012 dc_plane_state->color_space = plane_info.color_space;
5013 dc_plane_state->format = plane_info.format;
5014 dc_plane_state->plane_size = plane_info.plane_size;
5015 dc_plane_state->rotation = plane_info.rotation;
5016 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5017 dc_plane_state->stereo_format = plane_info.stereo_format;
5018 dc_plane_state->tiling_info = plane_info.tiling_info;
5019 dc_plane_state->visible = plane_info.visible;
5020 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5021 dc_plane_state->global_alpha = plane_info.global_alpha;
5022 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5023 dc_plane_state->dcc = plane_info.dcc;
5024 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5025 dc_plane_state->flip_int_enabled = true;
5028 * Always set input transfer function, since plane state is refreshed
5031 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5038 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5039 const struct dm_connector_state *dm_state,
5040 struct dc_stream_state *stream)
5042 enum amdgpu_rmx_type rmx_type;
5044 struct rect src = { 0 }; /* viewport in composition space*/
5045 struct rect dst = { 0 }; /* stream addressable area */
5047 /* no mode. nothing to be done */
5051 /* Full screen scaling by default */
5052 src.width = mode->hdisplay;
5053 src.height = mode->vdisplay;
5054 dst.width = stream->timing.h_addressable;
5055 dst.height = stream->timing.v_addressable;
5058 rmx_type = dm_state->scaling;
5059 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5060 if (src.width * dst.height <
5061 src.height * dst.width) {
5062 /* height needs less upscaling/more downscaling */
5063 dst.width = src.width *
5064 dst.height / src.height;
5066 /* width needs less upscaling/more downscaling */
5067 dst.height = src.height *
5068 dst.width / src.width;
5070 } else if (rmx_type == RMX_CENTER) {
5074 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5075 dst.y = (stream->timing.v_addressable - dst.height) / 2;
5077 if (dm_state->underscan_enable) {
5078 dst.x += dm_state->underscan_hborder / 2;
5079 dst.y += dm_state->underscan_vborder / 2;
5080 dst.width -= dm_state->underscan_hborder;
5081 dst.height -= dm_state->underscan_vborder;
5088 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
5089 dst.x, dst.y, dst.width, dst.height);
5093 static enum dc_color_depth
5094 convert_color_depth_from_display_info(const struct drm_connector *connector,
5095 bool is_y420, int requested_bpc)
5102 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5103 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5105 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5107 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5110 bpc = (uint8_t)connector->display_info.bpc;
5111 /* Assume 8 bpc by default if no bpc is specified. */
5112 bpc = bpc ? bpc : 8;
5115 if (requested_bpc > 0) {
5117 * Cap display bpc based on the user requested value.
5119 * The value for state->max_bpc may not correctly updated
5120 * depending on when the connector gets added to the state
5121 * or if this was called outside of atomic check, so it
5122 * can't be used directly.
5124 bpc = min_t(u8, bpc, requested_bpc);
5126 /* Round down to the nearest even number. */
5127 bpc = bpc - (bpc & 1);
5133 * Temporary Work around, DRM doesn't parse color depth for
5134 * EDID revision before 1.4
5135 * TODO: Fix edid parsing
5137 return COLOR_DEPTH_888;
5139 return COLOR_DEPTH_666;
5141 return COLOR_DEPTH_888;
5143 return COLOR_DEPTH_101010;
5145 return COLOR_DEPTH_121212;
5147 return COLOR_DEPTH_141414;
5149 return COLOR_DEPTH_161616;
5151 return COLOR_DEPTH_UNDEFINED;
5155 static enum dc_aspect_ratio
5156 get_aspect_ratio(const struct drm_display_mode *mode_in)
5158 /* 1-1 mapping, since both enums follow the HDMI spec. */
5159 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5162 static enum dc_color_space
5163 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5165 enum dc_color_space color_space = COLOR_SPACE_SRGB;
5167 switch (dc_crtc_timing->pixel_encoding) {
5168 case PIXEL_ENCODING_YCBCR422:
5169 case PIXEL_ENCODING_YCBCR444:
5170 case PIXEL_ENCODING_YCBCR420:
5173 * 27030khz is the separation point between HDTV and SDTV
5174 * according to HDMI spec, we use YCbCr709 and YCbCr601
5177 if (dc_crtc_timing->pix_clk_100hz > 270300) {
5178 if (dc_crtc_timing->flags.Y_ONLY)
5180 COLOR_SPACE_YCBCR709_LIMITED;
5182 color_space = COLOR_SPACE_YCBCR709;
5184 if (dc_crtc_timing->flags.Y_ONLY)
5186 COLOR_SPACE_YCBCR601_LIMITED;
5188 color_space = COLOR_SPACE_YCBCR601;
5193 case PIXEL_ENCODING_RGB:
5194 color_space = COLOR_SPACE_SRGB;
5205 static bool adjust_colour_depth_from_display_info(
5206 struct dc_crtc_timing *timing_out,
5207 const struct drm_display_info *info)
5209 enum dc_color_depth depth = timing_out->display_color_depth;
5212 normalized_clk = timing_out->pix_clk_100hz / 10;
5213 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5214 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5215 normalized_clk /= 2;
5216 /* Adjusting pix clock following on HDMI spec based on colour depth */
5218 case COLOR_DEPTH_888:
5220 case COLOR_DEPTH_101010:
5221 normalized_clk = (normalized_clk * 30) / 24;
5223 case COLOR_DEPTH_121212:
5224 normalized_clk = (normalized_clk * 36) / 24;
5226 case COLOR_DEPTH_161616:
5227 normalized_clk = (normalized_clk * 48) / 24;
5230 /* The above depths are the only ones valid for HDMI. */
5233 if (normalized_clk <= info->max_tmds_clock) {
5234 timing_out->display_color_depth = depth;
5237 } while (--depth > COLOR_DEPTH_666);
5241 static void fill_stream_properties_from_drm_display_mode(
5242 struct dc_stream_state *stream,
5243 const struct drm_display_mode *mode_in,
5244 const struct drm_connector *connector,
5245 const struct drm_connector_state *connector_state,
5246 const struct dc_stream_state *old_stream,
5249 struct dc_crtc_timing *timing_out = &stream->timing;
5250 const struct drm_display_info *info = &connector->display_info;
5251 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5252 struct hdmi_vendor_infoframe hv_frame;
5253 struct hdmi_avi_infoframe avi_frame;
5255 memset(&hv_frame, 0, sizeof(hv_frame));
5256 memset(&avi_frame, 0, sizeof(avi_frame));
5258 timing_out->h_border_left = 0;
5259 timing_out->h_border_right = 0;
5260 timing_out->v_border_top = 0;
5261 timing_out->v_border_bottom = 0;
5262 /* TODO: un-hardcode */
5263 if (drm_mode_is_420_only(info, mode_in)
5264 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5265 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5266 else if (drm_mode_is_420_also(info, mode_in)
5267 && aconnector->force_yuv420_output)
5268 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5269 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5270 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5271 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5273 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5275 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5276 timing_out->display_color_depth = convert_color_depth_from_display_info(
5278 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5280 timing_out->scan_type = SCANNING_TYPE_NODATA;
5281 timing_out->hdmi_vic = 0;
5284 timing_out->vic = old_stream->timing.vic;
5285 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5286 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5288 timing_out->vic = drm_match_cea_mode(mode_in);
5289 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5290 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5291 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5292 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5295 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5296 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5297 timing_out->vic = avi_frame.video_code;
5298 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5299 timing_out->hdmi_vic = hv_frame.vic;
5302 if (is_freesync_video_mode(mode_in, aconnector)) {
5303 timing_out->h_addressable = mode_in->hdisplay;
5304 timing_out->h_total = mode_in->htotal;
5305 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5306 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5307 timing_out->v_total = mode_in->vtotal;
5308 timing_out->v_addressable = mode_in->vdisplay;
5309 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5310 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5311 timing_out->pix_clk_100hz = mode_in->clock * 10;
5313 timing_out->h_addressable = mode_in->crtc_hdisplay;
5314 timing_out->h_total = mode_in->crtc_htotal;
5315 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5316 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5317 timing_out->v_total = mode_in->crtc_vtotal;
5318 timing_out->v_addressable = mode_in->crtc_vdisplay;
5319 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5320 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5321 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5324 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5326 stream->output_color_space = get_output_color_space(timing_out);
5328 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5329 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5330 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5331 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5332 drm_mode_is_420_also(info, mode_in) &&
5333 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5334 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5335 adjust_colour_depth_from_display_info(timing_out, info);
5340 static void fill_audio_info(struct audio_info *audio_info,
5341 const struct drm_connector *drm_connector,
5342 const struct dc_sink *dc_sink)
5345 int cea_revision = 0;
5346 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5348 audio_info->manufacture_id = edid_caps->manufacturer_id;
5349 audio_info->product_id = edid_caps->product_id;
5351 cea_revision = drm_connector->display_info.cea_rev;
5353 strscpy(audio_info->display_name,
5354 edid_caps->display_name,
5355 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5357 if (cea_revision >= 3) {
5358 audio_info->mode_count = edid_caps->audio_mode_count;
5360 for (i = 0; i < audio_info->mode_count; ++i) {
5361 audio_info->modes[i].format_code =
5362 (enum audio_format_code)
5363 (edid_caps->audio_modes[i].format_code);
5364 audio_info->modes[i].channel_count =
5365 edid_caps->audio_modes[i].channel_count;
5366 audio_info->modes[i].sample_rates.all =
5367 edid_caps->audio_modes[i].sample_rate;
5368 audio_info->modes[i].sample_size =
5369 edid_caps->audio_modes[i].sample_size;
5373 audio_info->flags.all = edid_caps->speaker_flags;
5375 /* TODO: We only check for the progressive mode, check for interlace mode too */
5376 if (drm_connector->latency_present[0]) {
5377 audio_info->video_latency = drm_connector->video_latency[0];
5378 audio_info->audio_latency = drm_connector->audio_latency[0];
5381 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5386 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5387 struct drm_display_mode *dst_mode)
5389 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5390 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5391 dst_mode->crtc_clock = src_mode->crtc_clock;
5392 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5393 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5394 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
5395 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5396 dst_mode->crtc_htotal = src_mode->crtc_htotal;
5397 dst_mode->crtc_hskew = src_mode->crtc_hskew;
5398 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5399 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5400 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5401 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5402 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5406 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5407 const struct drm_display_mode *native_mode,
5410 if (scale_enabled) {
5411 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5412 } else if (native_mode->clock == drm_mode->clock &&
5413 native_mode->htotal == drm_mode->htotal &&
5414 native_mode->vtotal == drm_mode->vtotal) {
5415 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5417 /* no scaling nor amdgpu inserted, no need to patch */
5421 static struct dc_sink *
5422 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5424 struct dc_sink_init_data sink_init_data = { 0 };
5425 struct dc_sink *sink = NULL;
5426 sink_init_data.link = aconnector->dc_link;
5427 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5429 sink = dc_sink_create(&sink_init_data);
5431 DRM_ERROR("Failed to create sink!\n");
5434 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5439 static void set_multisync_trigger_params(
5440 struct dc_stream_state *stream)
5442 struct dc_stream_state *master = NULL;
5444 if (stream->triggered_crtc_reset.enabled) {
5445 master = stream->triggered_crtc_reset.event_source;
5446 stream->triggered_crtc_reset.event =
5447 master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5448 CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5449 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5453 static void set_master_stream(struct dc_stream_state *stream_set[],
5456 int j, highest_rfr = 0, master_stream = 0;
5458 for (j = 0; j < stream_count; j++) {
5459 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5460 int refresh_rate = 0;
5462 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5463 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5464 if (refresh_rate > highest_rfr) {
5465 highest_rfr = refresh_rate;
5470 for (j = 0; j < stream_count; j++) {
5472 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5476 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5479 struct dc_stream_state *stream;
5481 if (context->stream_count < 2)
5483 for (i = 0; i < context->stream_count ; i++) {
5484 if (!context->streams[i])
5487 * TODO: add a function to read AMD VSDB bits and set
5488 * crtc_sync_master.multi_sync_enabled flag
5489 * For now it's set to false
5493 set_master_stream(context->streams, context->stream_count);
5495 for (i = 0; i < context->stream_count ; i++) {
5496 stream = context->streams[i];
5501 set_multisync_trigger_params(stream);
5505 static struct drm_display_mode *
5506 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5507 bool use_probed_modes)
5509 struct drm_display_mode *m, *m_pref = NULL;
5510 u16 current_refresh, highest_refresh;
5511 struct list_head *list_head = use_probed_modes ?
5512 &aconnector->base.probed_modes :
5513 &aconnector->base.modes;
5515 if (aconnector->freesync_vid_base.clock != 0)
5516 return &aconnector->freesync_vid_base;
5518 /* Find the preferred mode */
5519 list_for_each_entry (m, list_head, head) {
5520 if (m->type & DRM_MODE_TYPE_PREFERRED) {
5527 /* Probably an EDID with no preferred mode. Fallback to first entry */
5528 m_pref = list_first_entry_or_null(
5529 &aconnector->base.modes, struct drm_display_mode, head);
5531 DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5536 highest_refresh = drm_mode_vrefresh(m_pref);
5539 * Find the mode with highest refresh rate with same resolution.
5540 * For some monitors, preferred mode is not the mode with highest
5541 * supported refresh rate.
5543 list_for_each_entry (m, list_head, head) {
5544 current_refresh = drm_mode_vrefresh(m);
5546 if (m->hdisplay == m_pref->hdisplay &&
5547 m->vdisplay == m_pref->vdisplay &&
5548 highest_refresh < current_refresh) {
5549 highest_refresh = current_refresh;
5554 aconnector->freesync_vid_base = *m_pref;
5558 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
5559 struct amdgpu_dm_connector *aconnector)
5561 struct drm_display_mode *high_mode;
5564 high_mode = get_highest_refresh_rate_mode(aconnector, false);
5565 if (!high_mode || !mode)
5568 timing_diff = high_mode->vtotal - mode->vtotal;
5570 if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5571 high_mode->hdisplay != mode->hdisplay ||
5572 high_mode->vdisplay != mode->vdisplay ||
5573 high_mode->hsync_start != mode->hsync_start ||
5574 high_mode->hsync_end != mode->hsync_end ||
5575 high_mode->htotal != mode->htotal ||
5576 high_mode->hskew != mode->hskew ||
5577 high_mode->vscan != mode->vscan ||
5578 high_mode->vsync_start - mode->vsync_start != timing_diff ||
5579 high_mode->vsync_end - mode->vsync_end != timing_diff)
5585 static struct dc_stream_state *
5586 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5587 const struct drm_display_mode *drm_mode,
5588 const struct dm_connector_state *dm_state,
5589 const struct dc_stream_state *old_stream,
5592 struct drm_display_mode *preferred_mode = NULL;
5593 struct drm_connector *drm_connector;
5594 const struct drm_connector_state *con_state =
5595 dm_state ? &dm_state->base : NULL;
5596 struct dc_stream_state *stream = NULL;
5597 struct drm_display_mode mode = *drm_mode;
5598 struct drm_display_mode saved_mode;
5599 struct drm_display_mode *freesync_mode = NULL;
5600 bool native_mode_found = false;
5601 bool recalculate_timing = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5603 int preferred_refresh = 0;
5604 #if defined(CONFIG_DRM_AMD_DC_DCN)
5605 struct dsc_dec_dpcd_caps dsc_caps;
5606 uint32_t link_bandwidth_kbps;
5608 struct dc_sink *sink = NULL;
5610 memset(&saved_mode, 0, sizeof(saved_mode));
5612 if (aconnector == NULL) {
5613 DRM_ERROR("aconnector is NULL!\n");
5617 drm_connector = &aconnector->base;
5619 if (!aconnector->dc_sink) {
5620 sink = create_fake_sink(aconnector);
5624 sink = aconnector->dc_sink;
5625 dc_sink_retain(sink);
5628 stream = dc_create_stream_for_sink(sink);
5630 if (stream == NULL) {
5631 DRM_ERROR("Failed to create stream for sink!\n");
5635 stream->dm_stream_context = aconnector;
5637 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5638 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5640 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5641 /* Search for preferred mode */
5642 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5643 native_mode_found = true;
5647 if (!native_mode_found)
5648 preferred_mode = list_first_entry_or_null(
5649 &aconnector->base.modes,
5650 struct drm_display_mode,
5653 mode_refresh = drm_mode_vrefresh(&mode);
5655 if (preferred_mode == NULL) {
5657 * This may not be an error, the use case is when we have no
5658 * usermode calls to reset and set mode upon hotplug. In this
5659 * case, we call set mode ourselves to restore the previous mode
5660 * and the modelist may not be filled in in time.
5662 DRM_DEBUG_DRIVER("No preferred mode found\n");
5664 recalculate_timing |= amdgpu_freesync_vid_mode &&
5665 is_freesync_video_mode(&mode, aconnector);
5666 if (recalculate_timing) {
5667 freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5669 mode = *freesync_mode;
5671 decide_crtc_timing_for_drm_display_mode(
5672 &mode, preferred_mode,
5673 dm_state ? (dm_state->scaling != RMX_OFF) : false);
5676 preferred_refresh = drm_mode_vrefresh(preferred_mode);
5679 if (recalculate_timing)
5680 drm_mode_set_crtcinfo(&saved_mode, 0);
5682 drm_mode_set_crtcinfo(&mode, 0);
5685 * If scaling is enabled and refresh rate didn't change
5686 * we copy the vic and polarities of the old timings
5688 if (!recalculate_timing || mode_refresh != preferred_refresh)
5689 fill_stream_properties_from_drm_display_mode(
5690 stream, &mode, &aconnector->base, con_state, NULL,
5693 fill_stream_properties_from_drm_display_mode(
5694 stream, &mode, &aconnector->base, con_state, old_stream,
5697 stream->timing.flags.DSC = 0;
5699 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5700 #if defined(CONFIG_DRM_AMD_DC_DCN)
5701 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5702 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5703 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5705 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5706 dc_link_get_link_cap(aconnector->dc_link));
5708 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5709 /* Set DSC policy according to dsc_clock_en */
5710 dc_dsc_policy_set_enable_dsc_when_not_needed(
5711 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5713 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5715 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5717 link_bandwidth_kbps,
5719 &stream->timing.dsc_cfg))
5720 stream->timing.flags.DSC = 1;
5721 /* Overwrite the stream flag if DSC is enabled through debugfs */
5722 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5723 stream->timing.flags.DSC = 1;
5725 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5726 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5728 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5729 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5731 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5732 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5737 update_stream_scaling_settings(&mode, dm_state, stream);
5740 &stream->audio_info,
5744 update_stream_signal(stream, sink);
5746 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5747 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5749 if (stream->link->psr_settings.psr_feature_enabled) {
5751 // should decide stream support vsc sdp colorimetry capability
5752 // before building vsc info packet
5754 stream->use_vsc_sdp_for_colorimetry = false;
5755 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5756 stream->use_vsc_sdp_for_colorimetry =
5757 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5759 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5760 stream->use_vsc_sdp_for_colorimetry = true;
5762 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5765 dc_sink_release(sink);
5770 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5772 drm_crtc_cleanup(crtc);
5776 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5777 struct drm_crtc_state *state)
5779 struct dm_crtc_state *cur = to_dm_crtc_state(state);
5781 /* TODO Destroy dc_stream objects are stream object is flattened */
5783 dc_stream_release(cur->stream);
5786 __drm_atomic_helper_crtc_destroy_state(state);
5792 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5794 struct dm_crtc_state *state;
5797 dm_crtc_destroy_state(crtc, crtc->state);
5799 state = kzalloc(sizeof(*state), GFP_KERNEL);
5800 if (WARN_ON(!state))
5803 __drm_atomic_helper_crtc_reset(crtc, &state->base);
5806 static struct drm_crtc_state *
5807 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5809 struct dm_crtc_state *state, *cur;
5811 cur = to_dm_crtc_state(crtc->state);
5813 if (WARN_ON(!crtc->state))
5816 state = kzalloc(sizeof(*state), GFP_KERNEL);
5820 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5823 state->stream = cur->stream;
5824 dc_stream_retain(state->stream);
5827 state->active_planes = cur->active_planes;
5828 state->vrr_infopacket = cur->vrr_infopacket;
5829 state->abm_level = cur->abm_level;
5830 state->vrr_supported = cur->vrr_supported;
5831 state->freesync_config = cur->freesync_config;
5832 state->cm_has_degamma = cur->cm_has_degamma;
5833 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5834 /* TODO Duplicate dc_stream after objects are stream object is flattened */
5836 return &state->base;
5839 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
5840 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
5842 crtc_debugfs_init(crtc);
5848 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5850 enum dc_irq_source irq_source;
5851 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5852 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5855 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5857 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5859 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
5860 acrtc->crtc_id, enable ? "en" : "dis", rc);
5864 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5866 enum dc_irq_source irq_source;
5867 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5868 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5869 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5870 #if defined(CONFIG_DRM_AMD_DC_DCN)
5871 struct amdgpu_display_manager *dm = &adev->dm;
5872 unsigned long flags;
5877 /* vblank irq on -> Only need vupdate irq in vrr mode */
5878 if (amdgpu_dm_vrr_active(acrtc_state))
5879 rc = dm_set_vupdate_irq(crtc, true);
5881 /* vblank irq off -> vupdate irq off */
5882 rc = dm_set_vupdate_irq(crtc, false);
5888 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5890 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5893 if (amdgpu_in_reset(adev))
5896 #if defined(CONFIG_DRM_AMD_DC_DCN)
5897 spin_lock_irqsave(&dm->vblank_lock, flags);
5898 dm->vblank_workqueue->dm = dm;
5899 dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
5900 dm->vblank_workqueue->enable = enable;
5901 spin_unlock_irqrestore(&dm->vblank_lock, flags);
5902 schedule_work(&dm->vblank_workqueue->mall_work);
5908 static int dm_enable_vblank(struct drm_crtc *crtc)
5910 return dm_set_vblank(crtc, true);
5913 static void dm_disable_vblank(struct drm_crtc *crtc)
5915 dm_set_vblank(crtc, false);
5918 /* Implemented only the options currently availible for the driver */
5919 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5920 .reset = dm_crtc_reset_state,
5921 .destroy = amdgpu_dm_crtc_destroy,
5922 .set_config = drm_atomic_helper_set_config,
5923 .page_flip = drm_atomic_helper_page_flip,
5924 .atomic_duplicate_state = dm_crtc_duplicate_state,
5925 .atomic_destroy_state = dm_crtc_destroy_state,
5926 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
5927 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5928 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5929 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
5930 .enable_vblank = dm_enable_vblank,
5931 .disable_vblank = dm_disable_vblank,
5932 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5933 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
5934 .late_register = amdgpu_dm_crtc_late_register,
5938 static enum drm_connector_status
5939 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5942 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5946 * 1. This interface is NOT called in context of HPD irq.
5947 * 2. This interface *is called* in context of user-mode ioctl. Which
5948 * makes it a bad place for *any* MST-related activity.
5951 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5952 !aconnector->fake_enable)
5953 connected = (aconnector->dc_sink != NULL);
5955 connected = (aconnector->base.force == DRM_FORCE_ON);
5957 update_subconnector_property(aconnector);
5959 return (connected ? connector_status_connected :
5960 connector_status_disconnected);
5963 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5964 struct drm_connector_state *connector_state,
5965 struct drm_property *property,
5968 struct drm_device *dev = connector->dev;
5969 struct amdgpu_device *adev = drm_to_adev(dev);
5970 struct dm_connector_state *dm_old_state =
5971 to_dm_connector_state(connector->state);
5972 struct dm_connector_state *dm_new_state =
5973 to_dm_connector_state(connector_state);
5977 if (property == dev->mode_config.scaling_mode_property) {
5978 enum amdgpu_rmx_type rmx_type;
5981 case DRM_MODE_SCALE_CENTER:
5982 rmx_type = RMX_CENTER;
5984 case DRM_MODE_SCALE_ASPECT:
5985 rmx_type = RMX_ASPECT;
5987 case DRM_MODE_SCALE_FULLSCREEN:
5988 rmx_type = RMX_FULL;
5990 case DRM_MODE_SCALE_NONE:
5996 if (dm_old_state->scaling == rmx_type)
5999 dm_new_state->scaling = rmx_type;
6001 } else if (property == adev->mode_info.underscan_hborder_property) {
6002 dm_new_state->underscan_hborder = val;
6004 } else if (property == adev->mode_info.underscan_vborder_property) {
6005 dm_new_state->underscan_vborder = val;
6007 } else if (property == adev->mode_info.underscan_property) {
6008 dm_new_state->underscan_enable = val;
6010 } else if (property == adev->mode_info.abm_level_property) {
6011 dm_new_state->abm_level = val;
6018 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6019 const struct drm_connector_state *state,
6020 struct drm_property *property,
6023 struct drm_device *dev = connector->dev;
6024 struct amdgpu_device *adev = drm_to_adev(dev);
6025 struct dm_connector_state *dm_state =
6026 to_dm_connector_state(state);
6029 if (property == dev->mode_config.scaling_mode_property) {
6030 switch (dm_state->scaling) {
6032 *val = DRM_MODE_SCALE_CENTER;
6035 *val = DRM_MODE_SCALE_ASPECT;
6038 *val = DRM_MODE_SCALE_FULLSCREEN;
6042 *val = DRM_MODE_SCALE_NONE;
6046 } else if (property == adev->mode_info.underscan_hborder_property) {
6047 *val = dm_state->underscan_hborder;
6049 } else if (property == adev->mode_info.underscan_vborder_property) {
6050 *val = dm_state->underscan_vborder;
6052 } else if (property == adev->mode_info.underscan_property) {
6053 *val = dm_state->underscan_enable;
6055 } else if (property == adev->mode_info.abm_level_property) {
6056 *val = dm_state->abm_level;
6063 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6065 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6067 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6070 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6072 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6073 const struct dc_link *link = aconnector->dc_link;
6074 struct amdgpu_device *adev = drm_to_adev(connector->dev);
6075 struct amdgpu_display_manager *dm = &adev->dm;
6078 * Call only if mst_mgr was iniitalized before since it's not done
6079 * for all connector types.
6081 if (aconnector->mst_mgr.dev)
6082 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6084 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6085 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6087 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
6088 link->type != dc_connection_none &&
6089 dm->backlight_dev) {
6090 backlight_device_unregister(dm->backlight_dev);
6091 dm->backlight_dev = NULL;
6095 if (aconnector->dc_em_sink)
6096 dc_sink_release(aconnector->dc_em_sink);
6097 aconnector->dc_em_sink = NULL;
6098 if (aconnector->dc_sink)
6099 dc_sink_release(aconnector->dc_sink);
6100 aconnector->dc_sink = NULL;
6102 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6103 drm_connector_unregister(connector);
6104 drm_connector_cleanup(connector);
6105 if (aconnector->i2c) {
6106 i2c_del_adapter(&aconnector->i2c->base);
6107 kfree(aconnector->i2c);
6109 kfree(aconnector->dm_dp_aux.aux.name);
6114 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6116 struct dm_connector_state *state =
6117 to_dm_connector_state(connector->state);
6119 if (connector->state)
6120 __drm_atomic_helper_connector_destroy_state(connector->state);
6124 state = kzalloc(sizeof(*state), GFP_KERNEL);
6127 state->scaling = RMX_OFF;
6128 state->underscan_enable = false;
6129 state->underscan_hborder = 0;
6130 state->underscan_vborder = 0;
6131 state->base.max_requested_bpc = 8;
6132 state->vcpi_slots = 0;
6134 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6135 state->abm_level = amdgpu_dm_abm_level;
6137 __drm_atomic_helper_connector_reset(connector, &state->base);
6141 struct drm_connector_state *
6142 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6144 struct dm_connector_state *state =
6145 to_dm_connector_state(connector->state);
6147 struct dm_connector_state *new_state =
6148 kmemdup(state, sizeof(*state), GFP_KERNEL);
6153 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6155 new_state->freesync_capable = state->freesync_capable;
6156 new_state->abm_level = state->abm_level;
6157 new_state->scaling = state->scaling;
6158 new_state->underscan_enable = state->underscan_enable;
6159 new_state->underscan_hborder = state->underscan_hborder;
6160 new_state->underscan_vborder = state->underscan_vborder;
6161 new_state->vcpi_slots = state->vcpi_slots;
6162 new_state->pbn = state->pbn;
6163 return &new_state->base;
6167 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6169 struct amdgpu_dm_connector *amdgpu_dm_connector =
6170 to_amdgpu_dm_connector(connector);
6173 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6174 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6175 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6176 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6181 #if defined(CONFIG_DEBUG_FS)
6182 connector_debugfs_init(amdgpu_dm_connector);
6188 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6189 .reset = amdgpu_dm_connector_funcs_reset,
6190 .detect = amdgpu_dm_connector_detect,
6191 .fill_modes = drm_helper_probe_single_connector_modes,
6192 .destroy = amdgpu_dm_connector_destroy,
6193 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6194 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6195 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6196 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6197 .late_register = amdgpu_dm_connector_late_register,
6198 .early_unregister = amdgpu_dm_connector_unregister
6201 static int get_modes(struct drm_connector *connector)
6203 return amdgpu_dm_connector_get_modes(connector);
6206 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6208 struct dc_sink_init_data init_params = {
6209 .link = aconnector->dc_link,
6210 .sink_signal = SIGNAL_TYPE_VIRTUAL
6214 if (!aconnector->base.edid_blob_ptr) {
6215 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6216 aconnector->base.name);
6218 aconnector->base.force = DRM_FORCE_OFF;
6219 aconnector->base.override_edid = false;
6223 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6225 aconnector->edid = edid;
6227 aconnector->dc_em_sink = dc_link_add_remote_sink(
6228 aconnector->dc_link,
6230 (edid->extensions + 1) * EDID_LENGTH,
6233 if (aconnector->base.force == DRM_FORCE_ON) {
6234 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6235 aconnector->dc_link->local_sink :
6236 aconnector->dc_em_sink;
6237 dc_sink_retain(aconnector->dc_sink);
6241 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6243 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6246 * In case of headless boot with force on for DP managed connector
6247 * Those settings have to be != 0 to get initial modeset
6249 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6250 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6251 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6255 aconnector->base.override_edid = true;
6256 create_eml_sink(aconnector);
6259 static struct dc_stream_state *
6260 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6261 const struct drm_display_mode *drm_mode,
6262 const struct dm_connector_state *dm_state,
6263 const struct dc_stream_state *old_stream)
6265 struct drm_connector *connector = &aconnector->base;
6266 struct amdgpu_device *adev = drm_to_adev(connector->dev);
6267 struct dc_stream_state *stream;
6268 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6269 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6270 enum dc_status dc_result = DC_OK;
6273 stream = create_stream_for_sink(aconnector, drm_mode,
6274 dm_state, old_stream,
6276 if (stream == NULL) {
6277 DRM_ERROR("Failed to create stream for sink!\n");
6281 dc_result = dc_validate_stream(adev->dm.dc, stream);
6283 if (dc_result != DC_OK) {
6284 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6289 dc_status_to_str(dc_result));
6291 dc_stream_release(stream);
6293 requested_bpc -= 2; /* lower bpc to retry validation */
6296 } while (stream == NULL && requested_bpc >= 6);
6298 if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6299 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6301 aconnector->force_yuv420_output = true;
6302 stream = create_validate_stream_for_sink(aconnector, drm_mode,
6303 dm_state, old_stream);
6304 aconnector->force_yuv420_output = false;
6310 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6311 struct drm_display_mode *mode)
6313 int result = MODE_ERROR;
6314 struct dc_sink *dc_sink;
6315 /* TODO: Unhardcode stream count */
6316 struct dc_stream_state *stream;
6317 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6319 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6320 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6324 * Only run this the first time mode_valid is called to initilialize
6327 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6328 !aconnector->dc_em_sink)
6329 handle_edid_mgmt(aconnector);
6331 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6333 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6334 aconnector->base.force != DRM_FORCE_ON) {
6335 DRM_ERROR("dc_sink is NULL!\n");
6339 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6341 dc_stream_release(stream);
6346 /* TODO: error handling*/
6350 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6351 struct dc_info_packet *out)
6353 struct hdmi_drm_infoframe frame;
6354 unsigned char buf[30]; /* 26 + 4 */
6358 memset(out, 0, sizeof(*out));
6360 if (!state->hdr_output_metadata)
6363 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6367 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6371 /* Static metadata is a fixed 26 bytes + 4 byte header. */
6375 /* Prepare the infopacket for DC. */
6376 switch (state->connector->connector_type) {
6377 case DRM_MODE_CONNECTOR_HDMIA:
6378 out->hb0 = 0x87; /* type */
6379 out->hb1 = 0x01; /* version */
6380 out->hb2 = 0x1A; /* length */
6381 out->sb[0] = buf[3]; /* checksum */
6385 case DRM_MODE_CONNECTOR_DisplayPort:
6386 case DRM_MODE_CONNECTOR_eDP:
6387 out->hb0 = 0x00; /* sdp id, zero */
6388 out->hb1 = 0x87; /* type */
6389 out->hb2 = 0x1D; /* payload len - 1 */
6390 out->hb3 = (0x13 << 2); /* sdp version */
6391 out->sb[0] = 0x01; /* version */
6392 out->sb[1] = 0x1A; /* length */
6400 memcpy(&out->sb[i], &buf[4], 26);
6403 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6404 sizeof(out->sb), false);
6410 is_hdr_metadata_different(const struct drm_connector_state *old_state,
6411 const struct drm_connector_state *new_state)
6413 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
6414 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
6416 if (old_blob != new_blob) {
6417 if (old_blob && new_blob &&
6418 old_blob->length == new_blob->length)
6419 return memcmp(old_blob->data, new_blob->data,
6429 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6430 struct drm_atomic_state *state)
6432 struct drm_connector_state *new_con_state =
6433 drm_atomic_get_new_connector_state(state, conn);
6434 struct drm_connector_state *old_con_state =
6435 drm_atomic_get_old_connector_state(state, conn);
6436 struct drm_crtc *crtc = new_con_state->crtc;
6437 struct drm_crtc_state *new_crtc_state;
6440 trace_amdgpu_dm_connector_atomic_check(new_con_state);
6445 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
6446 struct dc_info_packet hdr_infopacket;
6448 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6452 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6453 if (IS_ERR(new_crtc_state))
6454 return PTR_ERR(new_crtc_state);
6457 * DC considers the stream backends changed if the
6458 * static metadata changes. Forcing the modeset also
6459 * gives a simple way for userspace to switch from
6460 * 8bpc to 10bpc when setting the metadata to enter
6463 * Changing the static metadata after it's been
6464 * set is permissible, however. So only force a
6465 * modeset if we're entering or exiting HDR.
6467 new_crtc_state->mode_changed =
6468 !old_con_state->hdr_output_metadata ||
6469 !new_con_state->hdr_output_metadata;
6475 static const struct drm_connector_helper_funcs
6476 amdgpu_dm_connector_helper_funcs = {
6478 * If hotplugging a second bigger display in FB Con mode, bigger resolution
6479 * modes will be filtered by drm_mode_validate_size(), and those modes
6480 * are missing after user start lightdm. So we need to renew modes list.
6481 * in get_modes call back, not just return the modes count
6483 .get_modes = get_modes,
6484 .mode_valid = amdgpu_dm_connector_mode_valid,
6485 .atomic_check = amdgpu_dm_connector_atomic_check,
6488 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6492 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6494 struct drm_atomic_state *state = new_crtc_state->state;
6495 struct drm_plane *plane;
6498 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6499 struct drm_plane_state *new_plane_state;
6501 /* Cursor planes are "fake". */
6502 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6505 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6507 if (!new_plane_state) {
6509 * The plane is enable on the CRTC and hasn't changed
6510 * state. This means that it previously passed
6511 * validation and is therefore enabled.
6517 /* We need a framebuffer to be considered enabled. */
6518 num_active += (new_plane_state->fb != NULL);
6524 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6525 struct drm_crtc_state *new_crtc_state)
6527 struct dm_crtc_state *dm_new_crtc_state =
6528 to_dm_crtc_state(new_crtc_state);
6530 dm_new_crtc_state->active_planes = 0;
6532 if (!dm_new_crtc_state->stream)
6535 dm_new_crtc_state->active_planes =
6536 count_crtc_active_planes(new_crtc_state);
6539 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6540 struct drm_atomic_state *state)
6542 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6544 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6545 struct dc *dc = adev->dm.dc;
6546 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6549 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6551 dm_update_crtc_active_planes(crtc, crtc_state);
6553 if (unlikely(!dm_crtc_state->stream &&
6554 modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6560 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6561 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6562 * planes are disabled, which is not supported by the hardware. And there is legacy
6563 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6565 if (crtc_state->enable &&
6566 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6567 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6571 /* In some use cases, like reset, no stream is attached */
6572 if (!dm_crtc_state->stream)
6575 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6578 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6582 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6583 const struct drm_display_mode *mode,
6584 struct drm_display_mode *adjusted_mode)
6589 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6590 .disable = dm_crtc_helper_disable,
6591 .atomic_check = dm_crtc_helper_atomic_check,
6592 .mode_fixup = dm_crtc_helper_mode_fixup,
6593 .get_scanout_position = amdgpu_crtc_get_scanout_position,
6596 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6601 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6603 switch (display_color_depth) {
6604 case COLOR_DEPTH_666:
6606 case COLOR_DEPTH_888:
6608 case COLOR_DEPTH_101010:
6610 case COLOR_DEPTH_121212:
6612 case COLOR_DEPTH_141414:
6614 case COLOR_DEPTH_161616:
6622 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6623 struct drm_crtc_state *crtc_state,
6624 struct drm_connector_state *conn_state)
6626 struct drm_atomic_state *state = crtc_state->state;
6627 struct drm_connector *connector = conn_state->connector;
6628 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6629 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6630 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6631 struct drm_dp_mst_topology_mgr *mst_mgr;
6632 struct drm_dp_mst_port *mst_port;
6633 enum dc_color_depth color_depth;
6635 bool is_y420 = false;
6637 if (!aconnector->port || !aconnector->dc_sink)
6640 mst_port = aconnector->port;
6641 mst_mgr = &aconnector->mst_port->mst_mgr;
6643 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6646 if (!state->duplicated) {
6647 int max_bpc = conn_state->max_requested_bpc;
6648 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6649 aconnector->force_yuv420_output;
6650 color_depth = convert_color_depth_from_display_info(connector,
6653 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6654 clock = adjusted_mode->clock;
6655 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6657 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6660 dm_new_connector_state->pbn,
6661 dm_mst_get_pbn_divider(aconnector->dc_link));
6662 if (dm_new_connector_state->vcpi_slots < 0) {
6663 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6664 return dm_new_connector_state->vcpi_slots;
6669 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6670 .disable = dm_encoder_helper_disable,
6671 .atomic_check = dm_encoder_helper_atomic_check
6674 #if defined(CONFIG_DRM_AMD_DC_DCN)
6675 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6676 struct dc_state *dc_state)
6678 struct dc_stream_state *stream = NULL;
6679 struct drm_connector *connector;
6680 struct drm_connector_state *new_con_state;
6681 struct amdgpu_dm_connector *aconnector;
6682 struct dm_connector_state *dm_conn_state;
6683 int i, j, clock, bpp;
6684 int vcpi, pbn_div, pbn = 0;
6686 for_each_new_connector_in_state(state, connector, new_con_state, i) {
6688 aconnector = to_amdgpu_dm_connector(connector);
6690 if (!aconnector->port)
6693 if (!new_con_state || !new_con_state->crtc)
6696 dm_conn_state = to_dm_connector_state(new_con_state);
6698 for (j = 0; j < dc_state->stream_count; j++) {
6699 stream = dc_state->streams[j];
6703 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6712 if (stream->timing.flags.DSC != 1) {
6713 drm_dp_mst_atomic_enable_dsc(state,
6721 pbn_div = dm_mst_get_pbn_divider(stream->link);
6722 bpp = stream->timing.dsc_cfg.bits_per_pixel;
6723 clock = stream->timing.pix_clk_100hz / 10;
6724 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6725 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6732 dm_conn_state->pbn = pbn;
6733 dm_conn_state->vcpi_slots = vcpi;
6739 static void dm_drm_plane_reset(struct drm_plane *plane)
6741 struct dm_plane_state *amdgpu_state = NULL;
6744 plane->funcs->atomic_destroy_state(plane, plane->state);
6746 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6747 WARN_ON(amdgpu_state == NULL);
6750 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6753 static struct drm_plane_state *
6754 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6756 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6758 old_dm_plane_state = to_dm_plane_state(plane->state);
6759 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6760 if (!dm_plane_state)
6763 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6765 if (old_dm_plane_state->dc_state) {
6766 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6767 dc_plane_state_retain(dm_plane_state->dc_state);
6770 return &dm_plane_state->base;
6773 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6774 struct drm_plane_state *state)
6776 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6778 if (dm_plane_state->dc_state)
6779 dc_plane_state_release(dm_plane_state->dc_state);
6781 drm_atomic_helper_plane_destroy_state(plane, state);
6784 static const struct drm_plane_funcs dm_plane_funcs = {
6785 .update_plane = drm_atomic_helper_update_plane,
6786 .disable_plane = drm_atomic_helper_disable_plane,
6787 .destroy = drm_primary_helper_destroy,
6788 .reset = dm_drm_plane_reset,
6789 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6790 .atomic_destroy_state = dm_drm_plane_destroy_state,
6791 .format_mod_supported = dm_plane_format_mod_supported,
6794 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6795 struct drm_plane_state *new_state)
6797 struct amdgpu_framebuffer *afb;
6798 struct drm_gem_object *obj;
6799 struct amdgpu_device *adev;
6800 struct amdgpu_bo *rbo;
6801 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6802 struct list_head list;
6803 struct ttm_validate_buffer tv;
6804 struct ww_acquire_ctx ticket;
6808 if (!new_state->fb) {
6809 DRM_DEBUG_KMS("No FB bound\n");
6813 afb = to_amdgpu_framebuffer(new_state->fb);
6814 obj = new_state->fb->obj[0];
6815 rbo = gem_to_amdgpu_bo(obj);
6816 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6817 INIT_LIST_HEAD(&list);
6821 list_add(&tv.head, &list);
6823 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6825 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6829 if (plane->type != DRM_PLANE_TYPE_CURSOR)
6830 domain = amdgpu_display_supported_domains(adev, rbo->flags);
6832 domain = AMDGPU_GEM_DOMAIN_VRAM;
6834 r = amdgpu_bo_pin(rbo, domain);
6835 if (unlikely(r != 0)) {
6836 if (r != -ERESTARTSYS)
6837 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6838 ttm_eu_backoff_reservation(&ticket, &list);
6842 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6843 if (unlikely(r != 0)) {
6844 amdgpu_bo_unpin(rbo);
6845 ttm_eu_backoff_reservation(&ticket, &list);
6846 DRM_ERROR("%p bind failed\n", rbo);
6850 ttm_eu_backoff_reservation(&ticket, &list);
6852 afb->address = amdgpu_bo_gpu_offset(rbo);
6857 * We don't do surface updates on planes that have been newly created,
6858 * but we also don't have the afb->address during atomic check.
6860 * Fill in buffer attributes depending on the address here, but only on
6861 * newly created planes since they're not being used by DC yet and this
6862 * won't modify global state.
6864 dm_plane_state_old = to_dm_plane_state(plane->state);
6865 dm_plane_state_new = to_dm_plane_state(new_state);
6867 if (dm_plane_state_new->dc_state &&
6868 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6869 struct dc_plane_state *plane_state =
6870 dm_plane_state_new->dc_state;
6871 bool force_disable_dcc = !plane_state->dcc.enable;
6873 fill_plane_buffer_attributes(
6874 adev, afb, plane_state->format, plane_state->rotation,
6876 &plane_state->tiling_info, &plane_state->plane_size,
6877 &plane_state->dcc, &plane_state->address,
6878 afb->tmz_surface, force_disable_dcc);
6884 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6885 struct drm_plane_state *old_state)
6887 struct amdgpu_bo *rbo;
6893 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6894 r = amdgpu_bo_reserve(rbo, false);
6896 DRM_ERROR("failed to reserve rbo before unpin\n");
6900 amdgpu_bo_unpin(rbo);
6901 amdgpu_bo_unreserve(rbo);
6902 amdgpu_bo_unref(&rbo);
6905 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6906 struct drm_crtc_state *new_crtc_state)
6908 struct drm_framebuffer *fb = state->fb;
6909 int min_downscale, max_upscale;
6911 int max_scale = INT_MAX;
6913 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6914 if (fb && state->crtc) {
6915 /* Validate viewport to cover the case when only the position changes */
6916 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
6917 int viewport_width = state->crtc_w;
6918 int viewport_height = state->crtc_h;
6920 if (state->crtc_x < 0)
6921 viewport_width += state->crtc_x;
6922 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
6923 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
6925 if (state->crtc_y < 0)
6926 viewport_height += state->crtc_y;
6927 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
6928 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
6930 if (viewport_width < 0 || viewport_height < 0) {
6931 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
6933 } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
6934 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
6936 } else if (viewport_height < MIN_VIEWPORT_SIZE) {
6937 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
6943 /* Get min/max allowed scaling factors from plane caps. */
6944 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6945 &min_downscale, &max_upscale);
6947 * Convert to drm convention: 16.16 fixed point, instead of dc's
6948 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6949 * dst/src, so min_scale = 1.0 / max_upscale, etc.
6951 min_scale = (1000 << 16) / max_upscale;
6952 max_scale = (1000 << 16) / min_downscale;
6955 return drm_atomic_helper_check_plane_state(
6956 state, new_crtc_state, min_scale, max_scale, true, true);
6959 static int dm_plane_atomic_check(struct drm_plane *plane,
6960 struct drm_atomic_state *state)
6962 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
6964 struct amdgpu_device *adev = drm_to_adev(plane->dev);
6965 struct dc *dc = adev->dm.dc;
6966 struct dm_plane_state *dm_plane_state;
6967 struct dc_scaling_info scaling_info;
6968 struct drm_crtc_state *new_crtc_state;
6971 trace_amdgpu_dm_plane_atomic_check(new_plane_state);
6973 dm_plane_state = to_dm_plane_state(new_plane_state);
6975 if (!dm_plane_state->dc_state)
6979 drm_atomic_get_new_crtc_state(state,
6980 new_plane_state->crtc);
6981 if (!new_crtc_state)
6984 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
6988 ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
6992 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6998 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6999 struct drm_atomic_state *state)
7001 /* Only support async updates on cursor planes. */
7002 if (plane->type != DRM_PLANE_TYPE_CURSOR)
7008 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7009 struct drm_atomic_state *state)
7011 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7013 struct drm_plane_state *old_state =
7014 drm_atomic_get_old_plane_state(state, plane);
7016 trace_amdgpu_dm_atomic_update_cursor(new_state);
7018 swap(plane->state->fb, new_state->fb);
7020 plane->state->src_x = new_state->src_x;
7021 plane->state->src_y = new_state->src_y;
7022 plane->state->src_w = new_state->src_w;
7023 plane->state->src_h = new_state->src_h;
7024 plane->state->crtc_x = new_state->crtc_x;
7025 plane->state->crtc_y = new_state->crtc_y;
7026 plane->state->crtc_w = new_state->crtc_w;
7027 plane->state->crtc_h = new_state->crtc_h;
7029 handle_cursor_update(plane, old_state);
7032 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7033 .prepare_fb = dm_plane_helper_prepare_fb,
7034 .cleanup_fb = dm_plane_helper_cleanup_fb,
7035 .atomic_check = dm_plane_atomic_check,
7036 .atomic_async_check = dm_plane_atomic_async_check,
7037 .atomic_async_update = dm_plane_atomic_async_update
7041 * TODO: these are currently initialized to rgb formats only.
7042 * For future use cases we should either initialize them dynamically based on
7043 * plane capabilities, or initialize this array to all formats, so internal drm
7044 * check will succeed, and let DC implement proper check
7046 static const uint32_t rgb_formats[] = {
7047 DRM_FORMAT_XRGB8888,
7048 DRM_FORMAT_ARGB8888,
7049 DRM_FORMAT_RGBA8888,
7050 DRM_FORMAT_XRGB2101010,
7051 DRM_FORMAT_XBGR2101010,
7052 DRM_FORMAT_ARGB2101010,
7053 DRM_FORMAT_ABGR2101010,
7054 DRM_FORMAT_XBGR8888,
7055 DRM_FORMAT_ABGR8888,
7059 static const uint32_t overlay_formats[] = {
7060 DRM_FORMAT_XRGB8888,
7061 DRM_FORMAT_ARGB8888,
7062 DRM_FORMAT_RGBA8888,
7063 DRM_FORMAT_XBGR8888,
7064 DRM_FORMAT_ABGR8888,
7068 static const u32 cursor_formats[] = {
7072 static int get_plane_formats(const struct drm_plane *plane,
7073 const struct dc_plane_cap *plane_cap,
7074 uint32_t *formats, int max_formats)
7076 int i, num_formats = 0;
7079 * TODO: Query support for each group of formats directly from
7080 * DC plane caps. This will require adding more formats to the
7084 switch (plane->type) {
7085 case DRM_PLANE_TYPE_PRIMARY:
7086 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7087 if (num_formats >= max_formats)
7090 formats[num_formats++] = rgb_formats[i];
7093 if (plane_cap && plane_cap->pixel_format_support.nv12)
7094 formats[num_formats++] = DRM_FORMAT_NV12;
7095 if (plane_cap && plane_cap->pixel_format_support.p010)
7096 formats[num_formats++] = DRM_FORMAT_P010;
7097 if (plane_cap && plane_cap->pixel_format_support.fp16) {
7098 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7099 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7100 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7101 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7105 case DRM_PLANE_TYPE_OVERLAY:
7106 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7107 if (num_formats >= max_formats)
7110 formats[num_formats++] = overlay_formats[i];
7114 case DRM_PLANE_TYPE_CURSOR:
7115 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7116 if (num_formats >= max_formats)
7119 formats[num_formats++] = cursor_formats[i];
7127 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7128 struct drm_plane *plane,
7129 unsigned long possible_crtcs,
7130 const struct dc_plane_cap *plane_cap)
7132 uint32_t formats[32];
7135 unsigned int supported_rotations;
7136 uint64_t *modifiers = NULL;
7138 num_formats = get_plane_formats(plane, plane_cap, formats,
7139 ARRAY_SIZE(formats));
7141 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7145 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7146 &dm_plane_funcs, formats, num_formats,
7147 modifiers, plane->type, NULL);
7152 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7153 plane_cap && plane_cap->per_pixel_alpha) {
7154 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7155 BIT(DRM_MODE_BLEND_PREMULTI);
7157 drm_plane_create_alpha_property(plane);
7158 drm_plane_create_blend_mode_property(plane, blend_caps);
7161 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7163 (plane_cap->pixel_format_support.nv12 ||
7164 plane_cap->pixel_format_support.p010)) {
7165 /* This only affects YUV formats. */
7166 drm_plane_create_color_properties(
7168 BIT(DRM_COLOR_YCBCR_BT601) |
7169 BIT(DRM_COLOR_YCBCR_BT709) |
7170 BIT(DRM_COLOR_YCBCR_BT2020),
7171 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7172 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7173 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7176 supported_rotations =
7177 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7178 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7180 if (dm->adev->asic_type >= CHIP_BONAIRE &&
7181 plane->type != DRM_PLANE_TYPE_CURSOR)
7182 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7183 supported_rotations);
7185 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7187 /* Create (reset) the plane state */
7188 if (plane->funcs->reset)
7189 plane->funcs->reset(plane);
7194 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7195 struct drm_plane *plane,
7196 uint32_t crtc_index)
7198 struct amdgpu_crtc *acrtc = NULL;
7199 struct drm_plane *cursor_plane;
7203 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7207 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7208 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7210 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7214 res = drm_crtc_init_with_planes(
7219 &amdgpu_dm_crtc_funcs, NULL);
7224 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7226 /* Create (reset) the plane state */
7227 if (acrtc->base.funcs->reset)
7228 acrtc->base.funcs->reset(&acrtc->base);
7230 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7231 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7233 acrtc->crtc_id = crtc_index;
7234 acrtc->base.enabled = false;
7235 acrtc->otg_inst = -1;
7237 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7238 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7239 true, MAX_COLOR_LUT_ENTRIES);
7240 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7246 kfree(cursor_plane);
7251 static int to_drm_connector_type(enum signal_type st)
7254 case SIGNAL_TYPE_HDMI_TYPE_A:
7255 return DRM_MODE_CONNECTOR_HDMIA;
7256 case SIGNAL_TYPE_EDP:
7257 return DRM_MODE_CONNECTOR_eDP;
7258 case SIGNAL_TYPE_LVDS:
7259 return DRM_MODE_CONNECTOR_LVDS;
7260 case SIGNAL_TYPE_RGB:
7261 return DRM_MODE_CONNECTOR_VGA;
7262 case SIGNAL_TYPE_DISPLAY_PORT:
7263 case SIGNAL_TYPE_DISPLAY_PORT_MST:
7264 return DRM_MODE_CONNECTOR_DisplayPort;
7265 case SIGNAL_TYPE_DVI_DUAL_LINK:
7266 case SIGNAL_TYPE_DVI_SINGLE_LINK:
7267 return DRM_MODE_CONNECTOR_DVID;
7268 case SIGNAL_TYPE_VIRTUAL:
7269 return DRM_MODE_CONNECTOR_VIRTUAL;
7272 return DRM_MODE_CONNECTOR_Unknown;
7276 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7278 struct drm_encoder *encoder;
7280 /* There is only one encoder per connector */
7281 drm_connector_for_each_possible_encoder(connector, encoder)
7287 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7289 struct drm_encoder *encoder;
7290 struct amdgpu_encoder *amdgpu_encoder;
7292 encoder = amdgpu_dm_connector_to_encoder(connector);
7294 if (encoder == NULL)
7297 amdgpu_encoder = to_amdgpu_encoder(encoder);
7299 amdgpu_encoder->native_mode.clock = 0;
7301 if (!list_empty(&connector->probed_modes)) {
7302 struct drm_display_mode *preferred_mode = NULL;
7304 list_for_each_entry(preferred_mode,
7305 &connector->probed_modes,
7307 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7308 amdgpu_encoder->native_mode = *preferred_mode;
7316 static struct drm_display_mode *
7317 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7319 int hdisplay, int vdisplay)
7321 struct drm_device *dev = encoder->dev;
7322 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7323 struct drm_display_mode *mode = NULL;
7324 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7326 mode = drm_mode_duplicate(dev, native_mode);
7331 mode->hdisplay = hdisplay;
7332 mode->vdisplay = vdisplay;
7333 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7334 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7340 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7341 struct drm_connector *connector)
7343 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7344 struct drm_display_mode *mode = NULL;
7345 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7346 struct amdgpu_dm_connector *amdgpu_dm_connector =
7347 to_amdgpu_dm_connector(connector);
7351 char name[DRM_DISPLAY_MODE_LEN];
7354 } common_modes[] = {
7355 { "640x480", 640, 480},
7356 { "800x600", 800, 600},
7357 { "1024x768", 1024, 768},
7358 { "1280x720", 1280, 720},
7359 { "1280x800", 1280, 800},
7360 {"1280x1024", 1280, 1024},
7361 { "1440x900", 1440, 900},
7362 {"1680x1050", 1680, 1050},
7363 {"1600x1200", 1600, 1200},
7364 {"1920x1080", 1920, 1080},
7365 {"1920x1200", 1920, 1200}
7368 n = ARRAY_SIZE(common_modes);
7370 for (i = 0; i < n; i++) {
7371 struct drm_display_mode *curmode = NULL;
7372 bool mode_existed = false;
7374 if (common_modes[i].w > native_mode->hdisplay ||
7375 common_modes[i].h > native_mode->vdisplay ||
7376 (common_modes[i].w == native_mode->hdisplay &&
7377 common_modes[i].h == native_mode->vdisplay))
7380 list_for_each_entry(curmode, &connector->probed_modes, head) {
7381 if (common_modes[i].w == curmode->hdisplay &&
7382 common_modes[i].h == curmode->vdisplay) {
7383 mode_existed = true;
7391 mode = amdgpu_dm_create_common_mode(encoder,
7392 common_modes[i].name, common_modes[i].w,
7394 drm_mode_probed_add(connector, mode);
7395 amdgpu_dm_connector->num_modes++;
7399 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7402 struct amdgpu_dm_connector *amdgpu_dm_connector =
7403 to_amdgpu_dm_connector(connector);
7406 /* empty probed_modes */
7407 INIT_LIST_HEAD(&connector->probed_modes);
7408 amdgpu_dm_connector->num_modes =
7409 drm_add_edid_modes(connector, edid);
7411 /* sorting the probed modes before calling function
7412 * amdgpu_dm_get_native_mode() since EDID can have
7413 * more than one preferred mode. The modes that are
7414 * later in the probed mode list could be of higher
7415 * and preferred resolution. For example, 3840x2160
7416 * resolution in base EDID preferred timing and 4096x2160
7417 * preferred resolution in DID extension block later.
7419 drm_mode_sort(&connector->probed_modes);
7420 amdgpu_dm_get_native_mode(connector);
7422 /* Freesync capabilities are reset by calling
7423 * drm_add_edid_modes() and need to be
7426 amdgpu_dm_update_freesync_caps(connector, edid);
7428 amdgpu_dm_connector->num_modes = 0;
7432 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7433 struct drm_display_mode *mode)
7435 struct drm_display_mode *m;
7437 list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7438 if (drm_mode_equal(m, mode))
7445 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7447 const struct drm_display_mode *m;
7448 struct drm_display_mode *new_mode;
7450 uint32_t new_modes_count = 0;
7452 /* Standard FPS values
7461 * 60 - Commonly used
7462 * 48,72,96 - Multiples of 24
7464 const uint32_t common_rates[] = { 23976, 24000, 25000, 29970, 30000,
7465 48000, 50000, 60000, 72000, 96000 };
7468 * Find mode with highest refresh rate with the same resolution
7469 * as the preferred mode. Some monitors report a preferred mode
7470 * with lower resolution than the highest refresh rate supported.
7473 m = get_highest_refresh_rate_mode(aconnector, true);
7477 for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7478 uint64_t target_vtotal, target_vtotal_diff;
7481 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7484 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7485 common_rates[i] > aconnector->max_vfreq * 1000)
7488 num = (unsigned long long)m->clock * 1000 * 1000;
7489 den = common_rates[i] * (unsigned long long)m->htotal;
7490 target_vtotal = div_u64(num, den);
7491 target_vtotal_diff = target_vtotal - m->vtotal;
7493 /* Check for illegal modes */
7494 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7495 m->vsync_end + target_vtotal_diff < m->vsync_start ||
7496 m->vtotal + target_vtotal_diff < m->vsync_end)
7499 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7503 new_mode->vtotal += (u16)target_vtotal_diff;
7504 new_mode->vsync_start += (u16)target_vtotal_diff;
7505 new_mode->vsync_end += (u16)target_vtotal_diff;
7506 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7507 new_mode->type |= DRM_MODE_TYPE_DRIVER;
7509 if (!is_duplicate_mode(aconnector, new_mode)) {
7510 drm_mode_probed_add(&aconnector->base, new_mode);
7511 new_modes_count += 1;
7513 drm_mode_destroy(aconnector->base.dev, new_mode);
7516 return new_modes_count;
7519 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7522 struct amdgpu_dm_connector *amdgpu_dm_connector =
7523 to_amdgpu_dm_connector(connector);
7525 if (!(amdgpu_freesync_vid_mode && edid))
7528 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7529 amdgpu_dm_connector->num_modes +=
7530 add_fs_modes(amdgpu_dm_connector);
7533 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
7535 struct amdgpu_dm_connector *amdgpu_dm_connector =
7536 to_amdgpu_dm_connector(connector);
7537 struct drm_encoder *encoder;
7538 struct edid *edid = amdgpu_dm_connector->edid;
7540 encoder = amdgpu_dm_connector_to_encoder(connector);
7542 if (!drm_edid_is_valid(edid)) {
7543 amdgpu_dm_connector->num_modes =
7544 drm_add_modes_noedid(connector, 640, 480);
7546 amdgpu_dm_connector_ddc_get_modes(connector, edid);
7547 amdgpu_dm_connector_add_common_modes(encoder, connector);
7548 amdgpu_dm_connector_add_freesync_modes(connector, edid);
7550 amdgpu_dm_fbc_init(connector);
7552 return amdgpu_dm_connector->num_modes;
7555 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7556 struct amdgpu_dm_connector *aconnector,
7558 struct dc_link *link,
7561 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7564 * Some of the properties below require access to state, like bpc.
7565 * Allocate some default initial connector state with our reset helper.
7567 if (aconnector->base.funcs->reset)
7568 aconnector->base.funcs->reset(&aconnector->base);
7570 aconnector->connector_id = link_index;
7571 aconnector->dc_link = link;
7572 aconnector->base.interlace_allowed = false;
7573 aconnector->base.doublescan_allowed = false;
7574 aconnector->base.stereo_allowed = false;
7575 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7576 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7577 aconnector->audio_inst = -1;
7578 mutex_init(&aconnector->hpd_lock);
7581 * configure support HPD hot plug connector_>polled default value is 0
7582 * which means HPD hot plug not supported
7584 switch (connector_type) {
7585 case DRM_MODE_CONNECTOR_HDMIA:
7586 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7587 aconnector->base.ycbcr_420_allowed =
7588 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7590 case DRM_MODE_CONNECTOR_DisplayPort:
7591 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7592 aconnector->base.ycbcr_420_allowed =
7593 link->link_enc->features.dp_ycbcr420_supported ? true : false;
7595 case DRM_MODE_CONNECTOR_DVID:
7596 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7602 drm_object_attach_property(&aconnector->base.base,
7603 dm->ddev->mode_config.scaling_mode_property,
7604 DRM_MODE_SCALE_NONE);
7606 drm_object_attach_property(&aconnector->base.base,
7607 adev->mode_info.underscan_property,
7609 drm_object_attach_property(&aconnector->base.base,
7610 adev->mode_info.underscan_hborder_property,
7612 drm_object_attach_property(&aconnector->base.base,
7613 adev->mode_info.underscan_vborder_property,
7616 if (!aconnector->mst_port)
7617 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7619 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
7620 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7621 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7623 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7624 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7625 drm_object_attach_property(&aconnector->base.base,
7626 adev->mode_info.abm_level_property, 0);
7629 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7630 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7631 connector_type == DRM_MODE_CONNECTOR_eDP) {
7632 drm_object_attach_property(
7633 &aconnector->base.base,
7634 dm->ddev->mode_config.hdr_output_metadata_property, 0);
7636 if (!aconnector->mst_port)
7637 drm_connector_attach_vrr_capable_property(&aconnector->base);
7639 #ifdef CONFIG_DRM_AMD_DC_HDCP
7640 if (adev->dm.hdcp_workqueue)
7641 drm_connector_attach_content_protection_property(&aconnector->base, true);
7646 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7647 struct i2c_msg *msgs, int num)
7649 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7650 struct ddc_service *ddc_service = i2c->ddc_service;
7651 struct i2c_command cmd;
7655 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7660 cmd.number_of_payloads = num;
7661 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7664 for (i = 0; i < num; i++) {
7665 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7666 cmd.payloads[i].address = msgs[i].addr;
7667 cmd.payloads[i].length = msgs[i].len;
7668 cmd.payloads[i].data = msgs[i].buf;
7672 ddc_service->ctx->dc,
7673 ddc_service->ddc_pin->hw_info.ddc_channel,
7677 kfree(cmd.payloads);
7681 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7683 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7686 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7687 .master_xfer = amdgpu_dm_i2c_xfer,
7688 .functionality = amdgpu_dm_i2c_func,
7691 static struct amdgpu_i2c_adapter *
7692 create_i2c(struct ddc_service *ddc_service,
7696 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7697 struct amdgpu_i2c_adapter *i2c;
7699 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7702 i2c->base.owner = THIS_MODULE;
7703 i2c->base.class = I2C_CLASS_DDC;
7704 i2c->base.dev.parent = &adev->pdev->dev;
7705 i2c->base.algo = &amdgpu_dm_i2c_algo;
7706 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7707 i2c_set_adapdata(&i2c->base, i2c);
7708 i2c->ddc_service = ddc_service;
7709 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7716 * Note: this function assumes that dc_link_detect() was called for the
7717 * dc_link which will be represented by this aconnector.
7719 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7720 struct amdgpu_dm_connector *aconnector,
7721 uint32_t link_index,
7722 struct amdgpu_encoder *aencoder)
7726 struct dc *dc = dm->dc;
7727 struct dc_link *link = dc_get_link_at_index(dc, link_index);
7728 struct amdgpu_i2c_adapter *i2c;
7730 link->priv = aconnector;
7732 DRM_DEBUG_DRIVER("%s()\n", __func__);
7734 i2c = create_i2c(link->ddc, link->link_index, &res);
7736 DRM_ERROR("Failed to create i2c adapter data\n");
7740 aconnector->i2c = i2c;
7741 res = i2c_add_adapter(&i2c->base);
7744 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7748 connector_type = to_drm_connector_type(link->connector_signal);
7750 res = drm_connector_init_with_ddc(
7753 &amdgpu_dm_connector_funcs,
7758 DRM_ERROR("connector_init failed\n");
7759 aconnector->connector_id = -1;
7763 drm_connector_helper_add(
7765 &amdgpu_dm_connector_helper_funcs);
7767 amdgpu_dm_connector_init_helper(
7774 drm_connector_attach_encoder(
7775 &aconnector->base, &aencoder->base);
7777 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7778 || connector_type == DRM_MODE_CONNECTOR_eDP)
7779 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7784 aconnector->i2c = NULL;
7789 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7791 switch (adev->mode_info.num_crtc) {
7808 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7809 struct amdgpu_encoder *aencoder,
7810 uint32_t link_index)
7812 struct amdgpu_device *adev = drm_to_adev(dev);
7814 int res = drm_encoder_init(dev,
7816 &amdgpu_dm_encoder_funcs,
7817 DRM_MODE_ENCODER_TMDS,
7820 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7823 aencoder->encoder_id = link_index;
7825 aencoder->encoder_id = -1;
7827 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7832 static void manage_dm_interrupts(struct amdgpu_device *adev,
7833 struct amdgpu_crtc *acrtc,
7837 * We have no guarantee that the frontend index maps to the same
7838 * backend index - some even map to more than one.
7840 * TODO: Use a different interrupt or check DC itself for the mapping.
7843 amdgpu_display_crtc_idx_to_irq_type(
7848 drm_crtc_vblank_on(&acrtc->base);
7851 &adev->pageflip_irq,
7853 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7860 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7868 &adev->pageflip_irq,
7870 drm_crtc_vblank_off(&acrtc->base);
7874 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7875 struct amdgpu_crtc *acrtc)
7878 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7881 * This reads the current state for the IRQ and force reapplies
7882 * the setting to hardware.
7884 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7888 is_scaling_state_different(const struct dm_connector_state *dm_state,
7889 const struct dm_connector_state *old_dm_state)
7891 if (dm_state->scaling != old_dm_state->scaling)
7893 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7894 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7896 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7897 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7899 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7900 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7905 #ifdef CONFIG_DRM_AMD_DC_HDCP
7906 static bool is_content_protection_different(struct drm_connector_state *state,
7907 const struct drm_connector_state *old_state,
7908 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7910 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7911 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7913 /* Handle: Type0/1 change */
7914 if (old_state->hdcp_content_type != state->hdcp_content_type &&
7915 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7916 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7920 /* CP is being re enabled, ignore this
7922 * Handles: ENABLED -> DESIRED
7924 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7925 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7926 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7930 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7932 * Handles: UNDESIRED -> ENABLED
7934 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7935 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7936 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7938 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7939 * hot-plug, headless s3, dpms
7941 * Handles: DESIRED -> DESIRED (Special case)
7943 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7944 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7945 dm_con_state->update_hdcp = false;
7950 * Handles: UNDESIRED -> UNDESIRED
7951 * DESIRED -> DESIRED
7952 * ENABLED -> ENABLED
7954 if (old_state->content_protection == state->content_protection)
7958 * Handles: UNDESIRED -> DESIRED
7959 * DESIRED -> UNDESIRED
7960 * ENABLED -> UNDESIRED
7962 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7966 * Handles: DESIRED -> ENABLED
7972 static void remove_stream(struct amdgpu_device *adev,
7973 struct amdgpu_crtc *acrtc,
7974 struct dc_stream_state *stream)
7976 /* this is the update mode case */
7978 acrtc->otg_inst = -1;
7979 acrtc->enabled = false;
7982 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7983 struct dc_cursor_position *position)
7985 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7987 int xorigin = 0, yorigin = 0;
7989 if (!crtc || !plane->state->fb)
7992 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7993 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7994 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7996 plane->state->crtc_w,
7997 plane->state->crtc_h);
8001 x = plane->state->crtc_x;
8002 y = plane->state->crtc_y;
8004 if (x <= -amdgpu_crtc->max_cursor_width ||
8005 y <= -amdgpu_crtc->max_cursor_height)
8009 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8013 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8016 position->enable = true;
8017 position->translate_by_source = true;
8020 position->x_hotspot = xorigin;
8021 position->y_hotspot = yorigin;
8026 static void handle_cursor_update(struct drm_plane *plane,
8027 struct drm_plane_state *old_plane_state)
8029 struct amdgpu_device *adev = drm_to_adev(plane->dev);
8030 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8031 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8032 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8033 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8034 uint64_t address = afb ? afb->address : 0;
8035 struct dc_cursor_position position = {0};
8036 struct dc_cursor_attributes attributes;
8039 if (!plane->state->fb && !old_plane_state->fb)
8042 DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8044 amdgpu_crtc->crtc_id,
8045 plane->state->crtc_w,
8046 plane->state->crtc_h);
8048 ret = get_cursor_position(plane, crtc, &position);
8052 if (!position.enable) {
8053 /* turn off cursor */
8054 if (crtc_state && crtc_state->stream) {
8055 mutex_lock(&adev->dm.dc_lock);
8056 dc_stream_set_cursor_position(crtc_state->stream,
8058 mutex_unlock(&adev->dm.dc_lock);
8063 amdgpu_crtc->cursor_width = plane->state->crtc_w;
8064 amdgpu_crtc->cursor_height = plane->state->crtc_h;
8066 memset(&attributes, 0, sizeof(attributes));
8067 attributes.address.high_part = upper_32_bits(address);
8068 attributes.address.low_part = lower_32_bits(address);
8069 attributes.width = plane->state->crtc_w;
8070 attributes.height = plane->state->crtc_h;
8071 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8072 attributes.rotation_angle = 0;
8073 attributes.attribute_flags.value = 0;
8075 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8077 if (crtc_state->stream) {
8078 mutex_lock(&adev->dm.dc_lock);
8079 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8081 DRM_ERROR("DC failed to set cursor attributes\n");
8083 if (!dc_stream_set_cursor_position(crtc_state->stream,
8085 DRM_ERROR("DC failed to set cursor position\n");
8086 mutex_unlock(&adev->dm.dc_lock);
8090 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8093 assert_spin_locked(&acrtc->base.dev->event_lock);
8094 WARN_ON(acrtc->event);
8096 acrtc->event = acrtc->base.state->event;
8098 /* Set the flip status */
8099 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8101 /* Mark this event as consumed */
8102 acrtc->base.state->event = NULL;
8104 DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8108 static void update_freesync_state_on_stream(
8109 struct amdgpu_display_manager *dm,
8110 struct dm_crtc_state *new_crtc_state,
8111 struct dc_stream_state *new_stream,
8112 struct dc_plane_state *surface,
8113 u32 flip_timestamp_in_us)
8115 struct mod_vrr_params vrr_params;
8116 struct dc_info_packet vrr_infopacket = {0};
8117 struct amdgpu_device *adev = dm->adev;
8118 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8119 unsigned long flags;
8120 bool pack_sdp_v1_3 = false;
8126 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8127 * For now it's sufficient to just guard against these conditions.
8130 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8133 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8134 vrr_params = acrtc->dm_irq_params.vrr_params;
8137 mod_freesync_handle_preflip(
8138 dm->freesync_module,
8141 flip_timestamp_in_us,
8144 if (adev->family < AMDGPU_FAMILY_AI &&
8145 amdgpu_dm_vrr_active(new_crtc_state)) {
8146 mod_freesync_handle_v_update(dm->freesync_module,
8147 new_stream, &vrr_params);
8149 /* Need to call this before the frame ends. */
8150 dc_stream_adjust_vmin_vmax(dm->dc,
8151 new_crtc_state->stream,
8152 &vrr_params.adjust);
8156 mod_freesync_build_vrr_infopacket(
8157 dm->freesync_module,
8161 TRANSFER_FUNC_UNKNOWN,
8165 new_crtc_state->freesync_timing_changed |=
8166 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8168 sizeof(vrr_params.adjust)) != 0);
8170 new_crtc_state->freesync_vrr_info_changed |=
8171 (memcmp(&new_crtc_state->vrr_infopacket,
8173 sizeof(vrr_infopacket)) != 0);
8175 acrtc->dm_irq_params.vrr_params = vrr_params;
8176 new_crtc_state->vrr_infopacket = vrr_infopacket;
8178 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8179 new_stream->vrr_infopacket = vrr_infopacket;
8181 if (new_crtc_state->freesync_vrr_info_changed)
8182 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8183 new_crtc_state->base.crtc->base.id,
8184 (int)new_crtc_state->base.vrr_enabled,
8185 (int)vrr_params.state);
8187 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8190 static void update_stream_irq_parameters(
8191 struct amdgpu_display_manager *dm,
8192 struct dm_crtc_state *new_crtc_state)
8194 struct dc_stream_state *new_stream = new_crtc_state->stream;
8195 struct mod_vrr_params vrr_params;
8196 struct mod_freesync_config config = new_crtc_state->freesync_config;
8197 struct amdgpu_device *adev = dm->adev;
8198 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8199 unsigned long flags;
8205 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8206 * For now it's sufficient to just guard against these conditions.
8208 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8211 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8212 vrr_params = acrtc->dm_irq_params.vrr_params;
8214 if (new_crtc_state->vrr_supported &&
8215 config.min_refresh_in_uhz &&
8216 config.max_refresh_in_uhz) {
8218 * if freesync compatible mode was set, config.state will be set
8221 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8222 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8223 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8224 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8225 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8226 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8227 vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8229 config.state = new_crtc_state->base.vrr_enabled ?
8230 VRR_STATE_ACTIVE_VARIABLE :
8234 config.state = VRR_STATE_UNSUPPORTED;
8237 mod_freesync_build_vrr_params(dm->freesync_module,
8239 &config, &vrr_params);
8241 new_crtc_state->freesync_timing_changed |=
8242 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8243 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8245 new_crtc_state->freesync_config = config;
8246 /* Copy state for access from DM IRQ handler */
8247 acrtc->dm_irq_params.freesync_config = config;
8248 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8249 acrtc->dm_irq_params.vrr_params = vrr_params;
8250 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8253 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8254 struct dm_crtc_state *new_state)
8256 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8257 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8259 if (!old_vrr_active && new_vrr_active) {
8260 /* Transition VRR inactive -> active:
8261 * While VRR is active, we must not disable vblank irq, as a
8262 * reenable after disable would compute bogus vblank/pflip
8263 * timestamps if it likely happened inside display front-porch.
8265 * We also need vupdate irq for the actual core vblank handling
8268 dm_set_vupdate_irq(new_state->base.crtc, true);
8269 drm_crtc_vblank_get(new_state->base.crtc);
8270 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8271 __func__, new_state->base.crtc->base.id);
8272 } else if (old_vrr_active && !new_vrr_active) {
8273 /* Transition VRR active -> inactive:
8274 * Allow vblank irq disable again for fixed refresh rate.
8276 dm_set_vupdate_irq(new_state->base.crtc, false);
8277 drm_crtc_vblank_put(new_state->base.crtc);
8278 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8279 __func__, new_state->base.crtc->base.id);
8283 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8285 struct drm_plane *plane;
8286 struct drm_plane_state *old_plane_state;
8290 * TODO: Make this per-stream so we don't issue redundant updates for
8291 * commits with multiple streams.
8293 for_each_old_plane_in_state(state, plane, old_plane_state, i)
8294 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8295 handle_cursor_update(plane, old_plane_state);
8298 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8299 struct dc_state *dc_state,
8300 struct drm_device *dev,
8301 struct amdgpu_display_manager *dm,
8302 struct drm_crtc *pcrtc,
8303 bool wait_for_vblank)
8306 uint64_t timestamp_ns;
8307 struct drm_plane *plane;
8308 struct drm_plane_state *old_plane_state, *new_plane_state;
8309 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8310 struct drm_crtc_state *new_pcrtc_state =
8311 drm_atomic_get_new_crtc_state(state, pcrtc);
8312 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8313 struct dm_crtc_state *dm_old_crtc_state =
8314 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8315 int planes_count = 0, vpos, hpos;
8317 unsigned long flags;
8318 struct amdgpu_bo *abo;
8319 uint32_t target_vblank, last_flip_vblank;
8320 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8321 bool pflip_present = false;
8323 struct dc_surface_update surface_updates[MAX_SURFACES];
8324 struct dc_plane_info plane_infos[MAX_SURFACES];
8325 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8326 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8327 struct dc_stream_update stream_update;
8330 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8333 dm_error("Failed to allocate update bundle\n");
8338 * Disable the cursor first if we're disabling all the planes.
8339 * It'll remain on the screen after the planes are re-enabled
8342 if (acrtc_state->active_planes == 0)
8343 amdgpu_dm_commit_cursors(state);
8345 /* update planes when needed */
8346 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8347 struct drm_crtc *crtc = new_plane_state->crtc;
8348 struct drm_crtc_state *new_crtc_state;
8349 struct drm_framebuffer *fb = new_plane_state->fb;
8350 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8351 bool plane_needs_flip;
8352 struct dc_plane_state *dc_plane;
8353 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8355 /* Cursor plane is handled after stream updates */
8356 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8359 if (!fb || !crtc || pcrtc != crtc)
8362 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8363 if (!new_crtc_state->active)
8366 dc_plane = dm_new_plane_state->dc_state;
8368 bundle->surface_updates[planes_count].surface = dc_plane;
8369 if (new_pcrtc_state->color_mgmt_changed) {
8370 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8371 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8372 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8375 fill_dc_scaling_info(new_plane_state,
8376 &bundle->scaling_infos[planes_count]);
8378 bundle->surface_updates[planes_count].scaling_info =
8379 &bundle->scaling_infos[planes_count];
8381 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8383 pflip_present = pflip_present || plane_needs_flip;
8385 if (!plane_needs_flip) {
8390 abo = gem_to_amdgpu_bo(fb->obj[0]);
8393 * Wait for all fences on this FB. Do limited wait to avoid
8394 * deadlock during GPU reset when this fence will not signal
8395 * but we hold reservation lock for the BO.
8397 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
8399 msecs_to_jiffies(5000));
8400 if (unlikely(r <= 0))
8401 DRM_ERROR("Waiting for fences timed out!");
8403 fill_dc_plane_info_and_addr(
8404 dm->adev, new_plane_state,
8406 &bundle->plane_infos[planes_count],
8407 &bundle->flip_addrs[planes_count].address,
8408 afb->tmz_surface, false);
8410 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
8411 new_plane_state->plane->index,
8412 bundle->plane_infos[planes_count].dcc.enable);
8414 bundle->surface_updates[planes_count].plane_info =
8415 &bundle->plane_infos[planes_count];
8418 * Only allow immediate flips for fast updates that don't
8419 * change FB pitch, DCC state, rotation or mirroing.
8421 bundle->flip_addrs[planes_count].flip_immediate =
8422 crtc->state->async_flip &&
8423 acrtc_state->update_type == UPDATE_TYPE_FAST;
8425 timestamp_ns = ktime_get_ns();
8426 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8427 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8428 bundle->surface_updates[planes_count].surface = dc_plane;
8430 if (!bundle->surface_updates[planes_count].surface) {
8431 DRM_ERROR("No surface for CRTC: id=%d\n",
8432 acrtc_attach->crtc_id);
8436 if (plane == pcrtc->primary)
8437 update_freesync_state_on_stream(
8440 acrtc_state->stream,
8442 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
8444 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
8446 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8447 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
8453 if (pflip_present) {
8455 /* Use old throttling in non-vrr fixed refresh rate mode
8456 * to keep flip scheduling based on target vblank counts
8457 * working in a backwards compatible way, e.g., for
8458 * clients using the GLX_OML_sync_control extension or
8459 * DRI3/Present extension with defined target_msc.
8461 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
8464 /* For variable refresh rate mode only:
8465 * Get vblank of last completed flip to avoid > 1 vrr
8466 * flips per video frame by use of throttling, but allow
8467 * flip programming anywhere in the possibly large
8468 * variable vrr vblank interval for fine-grained flip
8469 * timing control and more opportunity to avoid stutter
8470 * on late submission of flips.
8472 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8473 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
8474 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8477 target_vblank = last_flip_vblank + wait_for_vblank;
8480 * Wait until we're out of the vertical blank period before the one
8481 * targeted by the flip
8483 while ((acrtc_attach->enabled &&
8484 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8485 0, &vpos, &hpos, NULL,
8486 NULL, &pcrtc->hwmode)
8487 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8488 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8489 (int)(target_vblank -
8490 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8491 usleep_range(1000, 1100);
8495 * Prepare the flip event for the pageflip interrupt to handle.
8497 * This only works in the case where we've already turned on the
8498 * appropriate hardware blocks (eg. HUBP) so in the transition case
8499 * from 0 -> n planes we have to skip a hardware generated event
8500 * and rely on sending it from software.
8502 if (acrtc_attach->base.state->event &&
8503 acrtc_state->active_planes > 0) {
8504 drm_crtc_vblank_get(pcrtc);
8506 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8508 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8509 prepare_flip_isr(acrtc_attach);
8511 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8514 if (acrtc_state->stream) {
8515 if (acrtc_state->freesync_vrr_info_changed)
8516 bundle->stream_update.vrr_infopacket =
8517 &acrtc_state->stream->vrr_infopacket;
8521 /* Update the planes if changed or disable if we don't have any. */
8522 if ((planes_count || acrtc_state->active_planes == 0) &&
8523 acrtc_state->stream) {
8524 bundle->stream_update.stream = acrtc_state->stream;
8525 if (new_pcrtc_state->mode_changed) {
8526 bundle->stream_update.src = acrtc_state->stream->src;
8527 bundle->stream_update.dst = acrtc_state->stream->dst;
8530 if (new_pcrtc_state->color_mgmt_changed) {
8532 * TODO: This isn't fully correct since we've actually
8533 * already modified the stream in place.
8535 bundle->stream_update.gamut_remap =
8536 &acrtc_state->stream->gamut_remap_matrix;
8537 bundle->stream_update.output_csc_transform =
8538 &acrtc_state->stream->csc_color_matrix;
8539 bundle->stream_update.out_transfer_func =
8540 acrtc_state->stream->out_transfer_func;
8543 acrtc_state->stream->abm_level = acrtc_state->abm_level;
8544 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
8545 bundle->stream_update.abm_level = &acrtc_state->abm_level;
8548 * If FreeSync state on the stream has changed then we need to
8549 * re-adjust the min/max bounds now that DC doesn't handle this
8550 * as part of commit.
8552 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
8553 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8554 dc_stream_adjust_vmin_vmax(
8555 dm->dc, acrtc_state->stream,
8556 &acrtc_attach->dm_irq_params.vrr_params.adjust);
8557 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8559 mutex_lock(&dm->dc_lock);
8560 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8561 acrtc_state->stream->link->psr_settings.psr_allow_active)
8562 amdgpu_dm_psr_disable(acrtc_state->stream);
8564 dc_commit_updates_for_stream(dm->dc,
8565 bundle->surface_updates,
8567 acrtc_state->stream,
8568 &bundle->stream_update,
8572 * Enable or disable the interrupts on the backend.
8574 * Most pipes are put into power gating when unused.
8576 * When power gating is enabled on a pipe we lose the
8577 * interrupt enablement state when power gating is disabled.
8579 * So we need to update the IRQ control state in hardware
8580 * whenever the pipe turns on (since it could be previously
8581 * power gated) or off (since some pipes can't be power gated
8584 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8585 dm_update_pflip_irq_state(drm_to_adev(dev),
8588 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8589 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8590 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8591 amdgpu_dm_link_setup_psr(acrtc_state->stream);
8592 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
8593 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8594 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
8595 amdgpu_dm_psr_enable(acrtc_state->stream);
8598 mutex_unlock(&dm->dc_lock);
8602 * Update cursor state *after* programming all the planes.
8603 * This avoids redundant programming in the case where we're going
8604 * to be disabling a single plane - those pipes are being disabled.
8606 if (acrtc_state->active_planes)
8607 amdgpu_dm_commit_cursors(state);
8613 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8614 struct drm_atomic_state *state)
8616 struct amdgpu_device *adev = drm_to_adev(dev);
8617 struct amdgpu_dm_connector *aconnector;
8618 struct drm_connector *connector;
8619 struct drm_connector_state *old_con_state, *new_con_state;
8620 struct drm_crtc_state *new_crtc_state;
8621 struct dm_crtc_state *new_dm_crtc_state;
8622 const struct dc_stream_status *status;
8625 /* Notify device removals. */
8626 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8627 if (old_con_state->crtc != new_con_state->crtc) {
8628 /* CRTC changes require notification. */
8632 if (!new_con_state->crtc)
8635 new_crtc_state = drm_atomic_get_new_crtc_state(
8636 state, new_con_state->crtc);
8638 if (!new_crtc_state)
8641 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8645 aconnector = to_amdgpu_dm_connector(connector);
8647 mutex_lock(&adev->dm.audio_lock);
8648 inst = aconnector->audio_inst;
8649 aconnector->audio_inst = -1;
8650 mutex_unlock(&adev->dm.audio_lock);
8652 amdgpu_dm_audio_eld_notify(adev, inst);
8655 /* Notify audio device additions. */
8656 for_each_new_connector_in_state(state, connector, new_con_state, i) {
8657 if (!new_con_state->crtc)
8660 new_crtc_state = drm_atomic_get_new_crtc_state(
8661 state, new_con_state->crtc);
8663 if (!new_crtc_state)
8666 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8669 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8670 if (!new_dm_crtc_state->stream)
8673 status = dc_stream_get_status(new_dm_crtc_state->stream);
8677 aconnector = to_amdgpu_dm_connector(connector);
8679 mutex_lock(&adev->dm.audio_lock);
8680 inst = status->audio_inst;
8681 aconnector->audio_inst = inst;
8682 mutex_unlock(&adev->dm.audio_lock);
8684 amdgpu_dm_audio_eld_notify(adev, inst);
8689 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8690 * @crtc_state: the DRM CRTC state
8691 * @stream_state: the DC stream state.
8693 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8694 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8696 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8697 struct dc_stream_state *stream_state)
8699 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8703 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8704 * @state: The atomic state to commit
8706 * This will tell DC to commit the constructed DC state from atomic_check,
8707 * programming the hardware. Any failures here implies a hardware failure, since
8708 * atomic check should have filtered anything non-kosher.
8710 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8712 struct drm_device *dev = state->dev;
8713 struct amdgpu_device *adev = drm_to_adev(dev);
8714 struct amdgpu_display_manager *dm = &adev->dm;
8715 struct dm_atomic_state *dm_state;
8716 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8718 struct drm_crtc *crtc;
8719 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8720 unsigned long flags;
8721 bool wait_for_vblank = true;
8722 struct drm_connector *connector;
8723 struct drm_connector_state *old_con_state, *new_con_state;
8724 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8725 int crtc_disable_count = 0;
8726 bool mode_set_reset_required = false;
8728 trace_amdgpu_dm_atomic_commit_tail_begin(state);
8730 drm_atomic_helper_update_legacy_modeset_state(dev, state);
8732 dm_state = dm_atomic_get_new_state(state);
8733 if (dm_state && dm_state->context) {
8734 dc_state = dm_state->context;
8736 /* No state changes, retain current state. */
8737 dc_state_temp = dc_create_state(dm->dc);
8738 ASSERT(dc_state_temp);
8739 dc_state = dc_state_temp;
8740 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8743 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8744 new_crtc_state, i) {
8745 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8747 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8749 if (old_crtc_state->active &&
8750 (!new_crtc_state->active ||
8751 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8752 manage_dm_interrupts(adev, acrtc, false);
8753 dc_stream_release(dm_old_crtc_state->stream);
8757 drm_atomic_helper_calc_timestamping_constants(state);
8759 /* update changed items */
8760 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8761 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8763 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8764 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8767 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8768 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8769 "connectors_changed:%d\n",
8771 new_crtc_state->enable,
8772 new_crtc_state->active,
8773 new_crtc_state->planes_changed,
8774 new_crtc_state->mode_changed,
8775 new_crtc_state->active_changed,
8776 new_crtc_state->connectors_changed);
8778 /* Disable cursor if disabling crtc */
8779 if (old_crtc_state->active && !new_crtc_state->active) {
8780 struct dc_cursor_position position;
8782 memset(&position, 0, sizeof(position));
8783 mutex_lock(&dm->dc_lock);
8784 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8785 mutex_unlock(&dm->dc_lock);
8788 /* Copy all transient state flags into dc state */
8789 if (dm_new_crtc_state->stream) {
8790 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8791 dm_new_crtc_state->stream);
8794 /* handles headless hotplug case, updating new_state and
8795 * aconnector as needed
8798 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8800 DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8802 if (!dm_new_crtc_state->stream) {
8804 * this could happen because of issues with
8805 * userspace notifications delivery.
8806 * In this case userspace tries to set mode on
8807 * display which is disconnected in fact.
8808 * dc_sink is NULL in this case on aconnector.
8809 * We expect reset mode will come soon.
8811 * This can also happen when unplug is done
8812 * during resume sequence ended
8814 * In this case, we want to pretend we still
8815 * have a sink to keep the pipe running so that
8816 * hw state is consistent with the sw state
8818 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8819 __func__, acrtc->base.base.id);
8823 if (dm_old_crtc_state->stream)
8824 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8826 pm_runtime_get_noresume(dev->dev);
8828 acrtc->enabled = true;
8829 acrtc->hw_mode = new_crtc_state->mode;
8830 crtc->hwmode = new_crtc_state->mode;
8831 mode_set_reset_required = true;
8832 } else if (modereset_required(new_crtc_state)) {
8833 DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8834 /* i.e. reset mode */
8835 if (dm_old_crtc_state->stream)
8836 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8838 mode_set_reset_required = true;
8840 } /* for_each_crtc_in_state() */
8843 /* if there mode set or reset, disable eDP PSR */
8844 if (mode_set_reset_required)
8845 amdgpu_dm_psr_disable_all(dm);
8847 dm_enable_per_frame_crtc_master_sync(dc_state);
8848 mutex_lock(&dm->dc_lock);
8849 WARN_ON(!dc_commit_state(dm->dc, dc_state));
8850 #if defined(CONFIG_DRM_AMD_DC_DCN)
8851 /* Allow idle optimization when vblank count is 0 for display off */
8852 if (dm->active_vblank_irq_count == 0)
8853 dc_allow_idle_optimizations(dm->dc,true);
8855 mutex_unlock(&dm->dc_lock);
8858 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8859 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8861 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8863 if (dm_new_crtc_state->stream != NULL) {
8864 const struct dc_stream_status *status =
8865 dc_stream_get_status(dm_new_crtc_state->stream);
8868 status = dc_stream_get_status_from_state(dc_state,
8869 dm_new_crtc_state->stream);
8871 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8873 acrtc->otg_inst = status->primary_otg_inst;
8876 #ifdef CONFIG_DRM_AMD_DC_HDCP
8877 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8878 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8879 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8880 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8882 new_crtc_state = NULL;
8885 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8887 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8889 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8890 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8891 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8892 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8893 dm_new_con_state->update_hdcp = true;
8897 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8898 hdcp_update_display(
8899 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8900 new_con_state->hdcp_content_type,
8901 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
8905 /* Handle connector state changes */
8906 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8907 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8908 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8909 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8910 struct dc_surface_update dummy_updates[MAX_SURFACES];
8911 struct dc_stream_update stream_update;
8912 struct dc_info_packet hdr_packet;
8913 struct dc_stream_status *status = NULL;
8914 bool abm_changed, hdr_changed, scaling_changed;
8916 memset(&dummy_updates, 0, sizeof(dummy_updates));
8917 memset(&stream_update, 0, sizeof(stream_update));
8920 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8921 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8924 /* Skip any modesets/resets */
8925 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8928 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8929 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8931 scaling_changed = is_scaling_state_different(dm_new_con_state,
8934 abm_changed = dm_new_crtc_state->abm_level !=
8935 dm_old_crtc_state->abm_level;
8938 is_hdr_metadata_different(old_con_state, new_con_state);
8940 if (!scaling_changed && !abm_changed && !hdr_changed)
8943 stream_update.stream = dm_new_crtc_state->stream;
8944 if (scaling_changed) {
8945 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8946 dm_new_con_state, dm_new_crtc_state->stream);
8948 stream_update.src = dm_new_crtc_state->stream->src;
8949 stream_update.dst = dm_new_crtc_state->stream->dst;
8953 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8955 stream_update.abm_level = &dm_new_crtc_state->abm_level;
8959 fill_hdr_info_packet(new_con_state, &hdr_packet);
8960 stream_update.hdr_static_metadata = &hdr_packet;
8963 status = dc_stream_get_status(dm_new_crtc_state->stream);
8965 WARN_ON(!status->plane_count);
8968 * TODO: DC refuses to perform stream updates without a dc_surface_update.
8969 * Here we create an empty update on each plane.
8970 * To fix this, DC should permit updating only stream properties.
8972 for (j = 0; j < status->plane_count; j++)
8973 dummy_updates[j].surface = status->plane_states[0];
8976 mutex_lock(&dm->dc_lock);
8977 dc_commit_updates_for_stream(dm->dc,
8979 status->plane_count,
8980 dm_new_crtc_state->stream,
8983 mutex_unlock(&dm->dc_lock);
8986 /* Count number of newly disabled CRTCs for dropping PM refs later. */
8987 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8988 new_crtc_state, i) {
8989 if (old_crtc_state->active && !new_crtc_state->active)
8990 crtc_disable_count++;
8992 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8993 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8995 /* For freesync config update on crtc state and params for irq */
8996 update_stream_irq_parameters(dm, dm_new_crtc_state);
8998 /* Handle vrr on->off / off->on transitions */
8999 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9004 * Enable interrupts for CRTCs that are newly enabled or went through
9005 * a modeset. It was intentionally deferred until after the front end
9006 * state was modified to wait until the OTG was on and so the IRQ
9007 * handlers didn't access stale or invalid state.
9009 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9010 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9011 #ifdef CONFIG_DEBUG_FS
9012 bool configure_crc = false;
9013 enum amdgpu_dm_pipe_crc_source cur_crc_src;
9014 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9015 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9017 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9018 cur_crc_src = acrtc->dm_irq_params.crc_src;
9019 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9021 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9023 if (new_crtc_state->active &&
9024 (!old_crtc_state->active ||
9025 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9026 dc_stream_retain(dm_new_crtc_state->stream);
9027 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9028 manage_dm_interrupts(adev, acrtc, true);
9030 #ifdef CONFIG_DEBUG_FS
9032 * Frontend may have changed so reapply the CRC capture
9033 * settings for the stream.
9035 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9037 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9038 configure_crc = true;
9039 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9040 if (amdgpu_dm_crc_window_is_activated(crtc)) {
9041 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9042 acrtc->dm_irq_params.crc_window.update_win = true;
9043 acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9044 spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9045 crc_rd_wrk->crtc = crtc;
9046 spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9047 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9053 if (amdgpu_dm_crtc_configure_crc_source(
9054 crtc, dm_new_crtc_state, cur_crc_src))
9055 DRM_DEBUG_DRIVER("Failed to configure crc source");
9060 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9061 if (new_crtc_state->async_flip)
9062 wait_for_vblank = false;
9064 /* update planes when needed per crtc*/
9065 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9066 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9068 if (dm_new_crtc_state->stream)
9069 amdgpu_dm_commit_planes(state, dc_state, dev,
9070 dm, crtc, wait_for_vblank);
9073 /* Update audio instances for each connector. */
9074 amdgpu_dm_commit_audio(dev, state);
9077 * send vblank event on all events not handled in flip and
9078 * mark consumed event for drm_atomic_helper_commit_hw_done
9080 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9081 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9083 if (new_crtc_state->event)
9084 drm_send_event_locked(dev, &new_crtc_state->event->base);
9086 new_crtc_state->event = NULL;
9088 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9090 /* Signal HW programming completion */
9091 drm_atomic_helper_commit_hw_done(state);
9093 if (wait_for_vblank)
9094 drm_atomic_helper_wait_for_flip_done(dev, state);
9096 drm_atomic_helper_cleanup_planes(dev, state);
9098 /* return the stolen vga memory back to VRAM */
9099 if (!adev->mman.keep_stolen_vga_memory)
9100 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9101 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9104 * Finally, drop a runtime PM reference for each newly disabled CRTC,
9105 * so we can put the GPU into runtime suspend if we're not driving any
9108 for (i = 0; i < crtc_disable_count; i++)
9109 pm_runtime_put_autosuspend(dev->dev);
9110 pm_runtime_mark_last_busy(dev->dev);
9113 dc_release_state(dc_state_temp);
9117 static int dm_force_atomic_commit(struct drm_connector *connector)
9120 struct drm_device *ddev = connector->dev;
9121 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9122 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9123 struct drm_plane *plane = disconnected_acrtc->base.primary;
9124 struct drm_connector_state *conn_state;
9125 struct drm_crtc_state *crtc_state;
9126 struct drm_plane_state *plane_state;
9131 state->acquire_ctx = ddev->mode_config.acquire_ctx;
9133 /* Construct an atomic state to restore previous display setting */
9136 * Attach connectors to drm_atomic_state
9138 conn_state = drm_atomic_get_connector_state(state, connector);
9140 ret = PTR_ERR_OR_ZERO(conn_state);
9144 /* Attach crtc to drm_atomic_state*/
9145 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9147 ret = PTR_ERR_OR_ZERO(crtc_state);
9151 /* force a restore */
9152 crtc_state->mode_changed = true;
9154 /* Attach plane to drm_atomic_state */
9155 plane_state = drm_atomic_get_plane_state(state, plane);
9157 ret = PTR_ERR_OR_ZERO(plane_state);
9161 /* Call commit internally with the state we just constructed */
9162 ret = drm_atomic_commit(state);
9165 drm_atomic_state_put(state);
9167 DRM_ERROR("Restoring old state failed with %i\n", ret);
9173 * This function handles all cases when set mode does not come upon hotplug.
9174 * This includes when a display is unplugged then plugged back into the
9175 * same port and when running without usermode desktop manager supprot
9177 void dm_restore_drm_connector_state(struct drm_device *dev,
9178 struct drm_connector *connector)
9180 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9181 struct amdgpu_crtc *disconnected_acrtc;
9182 struct dm_crtc_state *acrtc_state;
9184 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9187 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9188 if (!disconnected_acrtc)
9191 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9192 if (!acrtc_state->stream)
9196 * If the previous sink is not released and different from the current,
9197 * we deduce we are in a state where we can not rely on usermode call
9198 * to turn on the display, so we do it here
9200 if (acrtc_state->stream->sink != aconnector->dc_sink)
9201 dm_force_atomic_commit(&aconnector->base);
9205 * Grabs all modesetting locks to serialize against any blocking commits,
9206 * Waits for completion of all non blocking commits.
9208 static int do_aquire_global_lock(struct drm_device *dev,
9209 struct drm_atomic_state *state)
9211 struct drm_crtc *crtc;
9212 struct drm_crtc_commit *commit;
9216 * Adding all modeset locks to aquire_ctx will
9217 * ensure that when the framework release it the
9218 * extra locks we are locking here will get released to
9220 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9224 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9225 spin_lock(&crtc->commit_lock);
9226 commit = list_first_entry_or_null(&crtc->commit_list,
9227 struct drm_crtc_commit, commit_entry);
9229 drm_crtc_commit_get(commit);
9230 spin_unlock(&crtc->commit_lock);
9236 * Make sure all pending HW programming completed and
9239 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9242 ret = wait_for_completion_interruptible_timeout(
9243 &commit->flip_done, 10*HZ);
9246 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9247 "timed out\n", crtc->base.id, crtc->name);
9249 drm_crtc_commit_put(commit);
9252 return ret < 0 ? ret : 0;
9255 static void get_freesync_config_for_crtc(
9256 struct dm_crtc_state *new_crtc_state,
9257 struct dm_connector_state *new_con_state)
9259 struct mod_freesync_config config = {0};
9260 struct amdgpu_dm_connector *aconnector =
9261 to_amdgpu_dm_connector(new_con_state->base.connector);
9262 struct drm_display_mode *mode = &new_crtc_state->base.mode;
9263 int vrefresh = drm_mode_vrefresh(mode);
9264 bool fs_vid_mode = false;
9266 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9267 vrefresh >= aconnector->min_vfreq &&
9268 vrefresh <= aconnector->max_vfreq;
9270 if (new_crtc_state->vrr_supported) {
9271 new_crtc_state->stream->ignore_msa_timing_param = true;
9272 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9274 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9275 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9276 config.vsif_supported = true;
9280 config.state = VRR_STATE_ACTIVE_FIXED;
9281 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9283 } else if (new_crtc_state->base.vrr_enabled) {
9284 config.state = VRR_STATE_ACTIVE_VARIABLE;
9286 config.state = VRR_STATE_INACTIVE;
9290 new_crtc_state->freesync_config = config;
9293 static void reset_freesync_config_for_crtc(
9294 struct dm_crtc_state *new_crtc_state)
9296 new_crtc_state->vrr_supported = false;
9298 memset(&new_crtc_state->vrr_infopacket, 0,
9299 sizeof(new_crtc_state->vrr_infopacket));
9303 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9304 struct drm_crtc_state *new_crtc_state)
9306 struct drm_display_mode old_mode, new_mode;
9308 if (!old_crtc_state || !new_crtc_state)
9311 old_mode = old_crtc_state->mode;
9312 new_mode = new_crtc_state->mode;
9314 if (old_mode.clock == new_mode.clock &&
9315 old_mode.hdisplay == new_mode.hdisplay &&
9316 old_mode.vdisplay == new_mode.vdisplay &&
9317 old_mode.htotal == new_mode.htotal &&
9318 old_mode.vtotal != new_mode.vtotal &&
9319 old_mode.hsync_start == new_mode.hsync_start &&
9320 old_mode.vsync_start != new_mode.vsync_start &&
9321 old_mode.hsync_end == new_mode.hsync_end &&
9322 old_mode.vsync_end != new_mode.vsync_end &&
9323 old_mode.hskew == new_mode.hskew &&
9324 old_mode.vscan == new_mode.vscan &&
9325 (old_mode.vsync_end - old_mode.vsync_start) ==
9326 (new_mode.vsync_end - new_mode.vsync_start))
9332 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9333 uint64_t num, den, res;
9334 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9336 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9338 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9339 den = (unsigned long long)new_crtc_state->mode.htotal *
9340 (unsigned long long)new_crtc_state->mode.vtotal;
9342 res = div_u64(num, den);
9343 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9346 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9347 struct drm_atomic_state *state,
9348 struct drm_crtc *crtc,
9349 struct drm_crtc_state *old_crtc_state,
9350 struct drm_crtc_state *new_crtc_state,
9352 bool *lock_and_validation_needed)
9354 struct dm_atomic_state *dm_state = NULL;
9355 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9356 struct dc_stream_state *new_stream;
9360 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9361 * update changed items
9363 struct amdgpu_crtc *acrtc = NULL;
9364 struct amdgpu_dm_connector *aconnector = NULL;
9365 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9366 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
9370 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9371 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9372 acrtc = to_amdgpu_crtc(crtc);
9373 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
9375 /* TODO This hack should go away */
9376 if (aconnector && enable) {
9377 /* Make sure fake sink is created in plug-in scenario */
9378 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9380 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9383 if (IS_ERR(drm_new_conn_state)) {
9384 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9388 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9389 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
9391 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9394 new_stream = create_validate_stream_for_sink(aconnector,
9395 &new_crtc_state->mode,
9397 dm_old_crtc_state->stream);
9400 * we can have no stream on ACTION_SET if a display
9401 * was disconnected during S3, in this case it is not an
9402 * error, the OS will be updated after detection, and
9403 * will do the right thing on next atomic commit
9407 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9408 __func__, acrtc->base.base.id);
9414 * TODO: Check VSDB bits to decide whether this should
9415 * be enabled or not.
9417 new_stream->triggered_crtc_reset.enabled =
9418 dm->force_timing_sync;
9420 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9422 ret = fill_hdr_info_packet(drm_new_conn_state,
9423 &new_stream->hdr_static_metadata);
9428 * If we already removed the old stream from the context
9429 * (and set the new stream to NULL) then we can't reuse
9430 * the old stream even if the stream and scaling are unchanged.
9431 * We'll hit the BUG_ON and black screen.
9433 * TODO: Refactor this function to allow this check to work
9434 * in all conditions.
9436 if (amdgpu_freesync_vid_mode &&
9437 dm_new_crtc_state->stream &&
9438 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9441 if (dm_new_crtc_state->stream &&
9442 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9443 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9444 new_crtc_state->mode_changed = false;
9445 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9446 new_crtc_state->mode_changed);
9450 /* mode_changed flag may get updated above, need to check again */
9451 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9455 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9456 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9457 "connectors_changed:%d\n",
9459 new_crtc_state->enable,
9460 new_crtc_state->active,
9461 new_crtc_state->planes_changed,
9462 new_crtc_state->mode_changed,
9463 new_crtc_state->active_changed,
9464 new_crtc_state->connectors_changed);
9466 /* Remove stream for any changed/disabled CRTC */
9469 if (!dm_old_crtc_state->stream)
9472 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
9473 is_timing_unchanged_for_freesync(new_crtc_state,
9475 new_crtc_state->mode_changed = false;
9477 "Mode change not required for front porch change, "
9478 "setting mode_changed to %d",
9479 new_crtc_state->mode_changed);
9481 set_freesync_fixed_config(dm_new_crtc_state);
9484 } else if (amdgpu_freesync_vid_mode && aconnector &&
9485 is_freesync_video_mode(&new_crtc_state->mode,
9487 set_freesync_fixed_config(dm_new_crtc_state);
9490 ret = dm_atomic_get_state(state, &dm_state);
9494 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9497 /* i.e. reset mode */
9498 if (dc_remove_stream_from_ctx(
9501 dm_old_crtc_state->stream) != DC_OK) {
9506 dc_stream_release(dm_old_crtc_state->stream);
9507 dm_new_crtc_state->stream = NULL;
9509 reset_freesync_config_for_crtc(dm_new_crtc_state);
9511 *lock_and_validation_needed = true;
9513 } else {/* Add stream for any updated/enabled CRTC */
9515 * Quick fix to prevent NULL pointer on new_stream when
9516 * added MST connectors not found in existing crtc_state in the chained mode
9517 * TODO: need to dig out the root cause of that
9519 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
9522 if (modereset_required(new_crtc_state))
9525 if (modeset_required(new_crtc_state, new_stream,
9526 dm_old_crtc_state->stream)) {
9528 WARN_ON(dm_new_crtc_state->stream);
9530 ret = dm_atomic_get_state(state, &dm_state);
9534 dm_new_crtc_state->stream = new_stream;
9536 dc_stream_retain(new_stream);
9538 DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
9541 if (dc_add_stream_to_ctx(
9544 dm_new_crtc_state->stream) != DC_OK) {
9549 *lock_and_validation_needed = true;
9554 /* Release extra reference */
9556 dc_stream_release(new_stream);
9559 * We want to do dc stream updates that do not require a
9560 * full modeset below.
9562 if (!(enable && aconnector && new_crtc_state->active))
9565 * Given above conditions, the dc state cannot be NULL because:
9566 * 1. We're in the process of enabling CRTCs (just been added
9567 * to the dc context, or already is on the context)
9568 * 2. Has a valid connector attached, and
9569 * 3. Is currently active and enabled.
9570 * => The dc stream state currently exists.
9572 BUG_ON(dm_new_crtc_state->stream == NULL);
9574 /* Scaling or underscan settings */
9575 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
9576 update_stream_scaling_settings(
9577 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
9580 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9583 * Color management settings. We also update color properties
9584 * when a modeset is needed, to ensure it gets reprogrammed.
9586 if (dm_new_crtc_state->base.color_mgmt_changed ||
9587 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9588 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
9593 /* Update Freesync settings. */
9594 get_freesync_config_for_crtc(dm_new_crtc_state,
9601 dc_stream_release(new_stream);
9605 static bool should_reset_plane(struct drm_atomic_state *state,
9606 struct drm_plane *plane,
9607 struct drm_plane_state *old_plane_state,
9608 struct drm_plane_state *new_plane_state)
9610 struct drm_plane *other;
9611 struct drm_plane_state *old_other_state, *new_other_state;
9612 struct drm_crtc_state *new_crtc_state;
9616 * TODO: Remove this hack once the checks below are sufficient
9617 * enough to determine when we need to reset all the planes on
9620 if (state->allow_modeset)
9623 /* Exit early if we know that we're adding or removing the plane. */
9624 if (old_plane_state->crtc != new_plane_state->crtc)
9627 /* old crtc == new_crtc == NULL, plane not in context. */
9628 if (!new_plane_state->crtc)
9632 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9634 if (!new_crtc_state)
9637 /* CRTC Degamma changes currently require us to recreate planes. */
9638 if (new_crtc_state->color_mgmt_changed)
9641 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9645 * If there are any new primary or overlay planes being added or
9646 * removed then the z-order can potentially change. To ensure
9647 * correct z-order and pipe acquisition the current DC architecture
9648 * requires us to remove and recreate all existing planes.
9650 * TODO: Come up with a more elegant solution for this.
9652 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9653 struct amdgpu_framebuffer *old_afb, *new_afb;
9654 if (other->type == DRM_PLANE_TYPE_CURSOR)
9657 if (old_other_state->crtc != new_plane_state->crtc &&
9658 new_other_state->crtc != new_plane_state->crtc)
9661 if (old_other_state->crtc != new_other_state->crtc)
9664 /* Src/dst size and scaling updates. */
9665 if (old_other_state->src_w != new_other_state->src_w ||
9666 old_other_state->src_h != new_other_state->src_h ||
9667 old_other_state->crtc_w != new_other_state->crtc_w ||
9668 old_other_state->crtc_h != new_other_state->crtc_h)
9671 /* Rotation / mirroring updates. */
9672 if (old_other_state->rotation != new_other_state->rotation)
9675 /* Blending updates. */
9676 if (old_other_state->pixel_blend_mode !=
9677 new_other_state->pixel_blend_mode)
9680 /* Alpha updates. */
9681 if (old_other_state->alpha != new_other_state->alpha)
9684 /* Colorspace changes. */
9685 if (old_other_state->color_range != new_other_state->color_range ||
9686 old_other_state->color_encoding != new_other_state->color_encoding)
9689 /* Framebuffer checks fall at the end. */
9690 if (!old_other_state->fb || !new_other_state->fb)
9693 /* Pixel format changes can require bandwidth updates. */
9694 if (old_other_state->fb->format != new_other_state->fb->format)
9697 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9698 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9700 /* Tiling and DCC changes also require bandwidth updates. */
9701 if (old_afb->tiling_flags != new_afb->tiling_flags ||
9702 old_afb->base.modifier != new_afb->base.modifier)
9709 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9710 struct drm_plane_state *new_plane_state,
9711 struct drm_framebuffer *fb)
9713 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9714 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9718 if (fb->width > new_acrtc->max_cursor_width ||
9719 fb->height > new_acrtc->max_cursor_height) {
9720 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9721 new_plane_state->fb->width,
9722 new_plane_state->fb->height);
9725 if (new_plane_state->src_w != fb->width << 16 ||
9726 new_plane_state->src_h != fb->height << 16) {
9727 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9731 /* Pitch in pixels */
9732 pitch = fb->pitches[0] / fb->format->cpp[0];
9734 if (fb->width != pitch) {
9735 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9744 /* FB pitch is supported by cursor plane */
9747 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9751 /* Core DRM takes care of checking FB modifiers, so we only need to
9752 * check tiling flags when the FB doesn't have a modifier. */
9753 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9754 if (adev->family < AMDGPU_FAMILY_AI) {
9755 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9756 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9757 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9759 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9762 DRM_DEBUG_ATOMIC("Cursor FB not linear");
9770 static int dm_update_plane_state(struct dc *dc,
9771 struct drm_atomic_state *state,
9772 struct drm_plane *plane,
9773 struct drm_plane_state *old_plane_state,
9774 struct drm_plane_state *new_plane_state,
9776 bool *lock_and_validation_needed)
9779 struct dm_atomic_state *dm_state = NULL;
9780 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9781 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9782 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9783 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9784 struct amdgpu_crtc *new_acrtc;
9789 new_plane_crtc = new_plane_state->crtc;
9790 old_plane_crtc = old_plane_state->crtc;
9791 dm_new_plane_state = to_dm_plane_state(new_plane_state);
9792 dm_old_plane_state = to_dm_plane_state(old_plane_state);
9794 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9795 if (!enable || !new_plane_crtc ||
9796 drm_atomic_plane_disabling(plane->state, new_plane_state))
9799 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9801 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9802 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9806 if (new_plane_state->fb) {
9807 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9808 new_plane_state->fb);
9816 needs_reset = should_reset_plane(state, plane, old_plane_state,
9819 /* Remove any changed/removed planes */
9824 if (!old_plane_crtc)
9827 old_crtc_state = drm_atomic_get_old_crtc_state(
9828 state, old_plane_crtc);
9829 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9831 if (!dm_old_crtc_state->stream)
9834 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9835 plane->base.id, old_plane_crtc->base.id);
9837 ret = dm_atomic_get_state(state, &dm_state);
9841 if (!dc_remove_plane_from_context(
9843 dm_old_crtc_state->stream,
9844 dm_old_plane_state->dc_state,
9845 dm_state->context)) {
9851 dc_plane_state_release(dm_old_plane_state->dc_state);
9852 dm_new_plane_state->dc_state = NULL;
9854 *lock_and_validation_needed = true;
9856 } else { /* Add new planes */
9857 struct dc_plane_state *dc_new_plane_state;
9859 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9862 if (!new_plane_crtc)
9865 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9866 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9868 if (!dm_new_crtc_state->stream)
9874 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9878 WARN_ON(dm_new_plane_state->dc_state);
9880 dc_new_plane_state = dc_create_plane_state(dc);
9881 if (!dc_new_plane_state)
9884 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
9885 plane->base.id, new_plane_crtc->base.id);
9887 ret = fill_dc_plane_attributes(
9888 drm_to_adev(new_plane_crtc->dev),
9893 dc_plane_state_release(dc_new_plane_state);
9897 ret = dm_atomic_get_state(state, &dm_state);
9899 dc_plane_state_release(dc_new_plane_state);
9904 * Any atomic check errors that occur after this will
9905 * not need a release. The plane state will be attached
9906 * to the stream, and therefore part of the atomic
9907 * state. It'll be released when the atomic state is
9910 if (!dc_add_plane_to_context(
9912 dm_new_crtc_state->stream,
9914 dm_state->context)) {
9916 dc_plane_state_release(dc_new_plane_state);
9920 dm_new_plane_state->dc_state = dc_new_plane_state;
9922 /* Tell DC to do a full surface update every time there
9923 * is a plane change. Inefficient, but works for now.
9925 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9927 *lock_and_validation_needed = true;
9934 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9935 struct drm_crtc *crtc,
9936 struct drm_crtc_state *new_crtc_state)
9938 struct drm_plane_state *new_cursor_state, *new_primary_state;
9939 int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9941 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9942 * cursor per pipe but it's going to inherit the scaling and
9943 * positioning from the underlying pipe. Check the cursor plane's
9944 * blending properties match the primary plane's. */
9946 new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9947 new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9948 if (!new_cursor_state || !new_primary_state ||
9949 !new_cursor_state->fb || !new_primary_state->fb) {
9953 cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9954 (new_cursor_state->src_w >> 16);
9955 cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9956 (new_cursor_state->src_h >> 16);
9958 primary_scale_w = new_primary_state->crtc_w * 1000 /
9959 (new_primary_state->src_w >> 16);
9960 primary_scale_h = new_primary_state->crtc_h * 1000 /
9961 (new_primary_state->src_h >> 16);
9963 if (cursor_scale_w != primary_scale_w ||
9964 cursor_scale_h != primary_scale_h) {
9965 DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9972 #if defined(CONFIG_DRM_AMD_DC_DCN)
9973 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9975 struct drm_connector *connector;
9976 struct drm_connector_state *conn_state;
9977 struct amdgpu_dm_connector *aconnector = NULL;
9979 for_each_new_connector_in_state(state, connector, conn_state, i) {
9980 if (conn_state->crtc != crtc)
9983 aconnector = to_amdgpu_dm_connector(connector);
9984 if (!aconnector->port || !aconnector->mst_port)
9993 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9997 static int validate_overlay(struct drm_atomic_state *state)
10000 struct drm_plane *plane;
10001 struct drm_plane_state *old_plane_state, *new_plane_state;
10002 struct drm_plane_state *primary_state, *overlay_state = NULL;
10004 /* Check if primary plane is contained inside overlay */
10005 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10006 if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
10007 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10010 overlay_state = new_plane_state;
10015 /* check if we're making changes to the overlay plane */
10016 if (!overlay_state)
10019 /* check if overlay plane is enabled */
10020 if (!overlay_state->crtc)
10023 /* find the primary plane for the CRTC that the overlay is enabled on */
10024 primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
10025 if (IS_ERR(primary_state))
10026 return PTR_ERR(primary_state);
10028 /* check if primary plane is enabled */
10029 if (!primary_state->crtc)
10032 /* Perform the bounds check to ensure the overlay plane covers the primary */
10033 if (primary_state->crtc_x < overlay_state->crtc_x ||
10034 primary_state->crtc_y < overlay_state->crtc_y ||
10035 primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
10036 primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
10037 DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
10045 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10046 * @dev: The DRM device
10047 * @state: The atomic state to commit
10049 * Validate that the given atomic state is programmable by DC into hardware.
10050 * This involves constructing a &struct dc_state reflecting the new hardware
10051 * state we wish to commit, then querying DC to see if it is programmable. It's
10052 * important not to modify the existing DC state. Otherwise, atomic_check
10053 * may unexpectedly commit hardware changes.
10055 * When validating the DC state, it's important that the right locks are
10056 * acquired. For full updates case which removes/adds/updates streams on one
10057 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10058 * that any such full update commit will wait for completion of any outstanding
10059 * flip using DRMs synchronization events.
10061 * Note that DM adds the affected connectors for all CRTCs in state, when that
10062 * might not seem necessary. This is because DC stream creation requires the
10063 * DC sink, which is tied to the DRM connector state. Cleaning this up should
10064 * be possible but non-trivial - a possible TODO item.
10066 * Return: -Error code if validation failed.
10068 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10069 struct drm_atomic_state *state)
10071 struct amdgpu_device *adev = drm_to_adev(dev);
10072 struct dm_atomic_state *dm_state = NULL;
10073 struct dc *dc = adev->dm.dc;
10074 struct drm_connector *connector;
10075 struct drm_connector_state *old_con_state, *new_con_state;
10076 struct drm_crtc *crtc;
10077 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10078 struct drm_plane *plane;
10079 struct drm_plane_state *old_plane_state, *new_plane_state;
10080 enum dc_status status;
10082 bool lock_and_validation_needed = false;
10083 struct dm_crtc_state *dm_old_crtc_state;
10085 trace_amdgpu_dm_atomic_check_begin(state);
10087 ret = drm_atomic_helper_check_modeset(dev, state);
10091 /* Check connector changes */
10092 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10093 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10094 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10096 /* Skip connectors that are disabled or part of modeset already. */
10097 if (!old_con_state->crtc && !new_con_state->crtc)
10100 if (!new_con_state->crtc)
10103 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10104 if (IS_ERR(new_crtc_state)) {
10105 ret = PTR_ERR(new_crtc_state);
10109 if (dm_old_con_state->abm_level !=
10110 dm_new_con_state->abm_level)
10111 new_crtc_state->connectors_changed = true;
10114 #if defined(CONFIG_DRM_AMD_DC_DCN)
10115 if (dc_resource_is_dsc_encoding_supported(dc)) {
10116 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10117 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10118 ret = add_affected_mst_dsc_crtcs(state, crtc);
10125 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10126 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10128 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10129 !new_crtc_state->color_mgmt_changed &&
10130 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10131 dm_old_crtc_state->dsc_force_changed == false)
10134 if (!new_crtc_state->enable)
10137 ret = drm_atomic_add_affected_connectors(state, crtc);
10141 ret = drm_atomic_add_affected_planes(state, crtc);
10145 if (dm_old_crtc_state->dsc_force_changed)
10146 new_crtc_state->mode_changed = true;
10150 * Add all primary and overlay planes on the CRTC to the state
10151 * whenever a plane is enabled to maintain correct z-ordering
10152 * and to enable fast surface updates.
10154 drm_for_each_crtc(crtc, dev) {
10155 bool modified = false;
10157 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10158 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10161 if (new_plane_state->crtc == crtc ||
10162 old_plane_state->crtc == crtc) {
10171 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10172 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10176 drm_atomic_get_plane_state(state, plane);
10178 if (IS_ERR(new_plane_state)) {
10179 ret = PTR_ERR(new_plane_state);
10185 /* Remove exiting planes if they are modified */
10186 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10187 ret = dm_update_plane_state(dc, state, plane,
10191 &lock_and_validation_needed);
10196 /* Disable all crtcs which require disable */
10197 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10198 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10202 &lock_and_validation_needed);
10207 /* Enable all crtcs which require enable */
10208 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10209 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10213 &lock_and_validation_needed);
10218 ret = validate_overlay(state);
10222 /* Add new/modified planes */
10223 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10224 ret = dm_update_plane_state(dc, state, plane,
10228 &lock_and_validation_needed);
10233 /* Run this here since we want to validate the streams we created */
10234 ret = drm_atomic_helper_check_planes(dev, state);
10238 /* Check cursor planes scaling */
10239 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10240 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10245 if (state->legacy_cursor_update) {
10247 * This is a fast cursor update coming from the plane update
10248 * helper, check if it can be done asynchronously for better
10251 state->async_update =
10252 !drm_atomic_helper_async_check(dev, state);
10255 * Skip the remaining global validation if this is an async
10256 * update. Cursor updates can be done without affecting
10257 * state or bandwidth calcs and this avoids the performance
10258 * penalty of locking the private state object and
10259 * allocating a new dc_state.
10261 if (state->async_update)
10265 /* Check scaling and underscan changes*/
10266 /* TODO Removed scaling changes validation due to inability to commit
10267 * new stream into context w\o causing full reset. Need to
10268 * decide how to handle.
10270 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10271 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10272 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10273 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10275 /* Skip any modesets/resets */
10276 if (!acrtc || drm_atomic_crtc_needs_modeset(
10277 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10280 /* Skip any thing not scale or underscan changes */
10281 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10284 lock_and_validation_needed = true;
10288 * Streams and planes are reset when there are changes that affect
10289 * bandwidth. Anything that affects bandwidth needs to go through
10290 * DC global validation to ensure that the configuration can be applied
10293 * We have to currently stall out here in atomic_check for outstanding
10294 * commits to finish in this case because our IRQ handlers reference
10295 * DRM state directly - we can end up disabling interrupts too early
10298 * TODO: Remove this stall and drop DM state private objects.
10300 if (lock_and_validation_needed) {
10301 ret = dm_atomic_get_state(state, &dm_state);
10305 ret = do_aquire_global_lock(dev, state);
10309 #if defined(CONFIG_DRM_AMD_DC_DCN)
10310 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
10313 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
10319 * Perform validation of MST topology in the state:
10320 * We need to perform MST atomic check before calling
10321 * dc_validate_global_state(), or there is a chance
10322 * to get stuck in an infinite loop and hang eventually.
10324 ret = drm_dp_mst_atomic_check(state);
10327 status = dc_validate_global_state(dc, dm_state->context, false);
10328 if (status != DC_OK) {
10329 DC_LOG_WARNING("DC global validation failure: %s (%d)",
10330 dc_status_to_str(status), status);
10336 * The commit is a fast update. Fast updates shouldn't change
10337 * the DC context, affect global validation, and can have their
10338 * commit work done in parallel with other commits not touching
10339 * the same resource. If we have a new DC context as part of
10340 * the DM atomic state from validation we need to free it and
10341 * retain the existing one instead.
10343 * Furthermore, since the DM atomic state only contains the DC
10344 * context and can safely be annulled, we can free the state
10345 * and clear the associated private object now to free
10346 * some memory and avoid a possible use-after-free later.
10349 for (i = 0; i < state->num_private_objs; i++) {
10350 struct drm_private_obj *obj = state->private_objs[i].ptr;
10352 if (obj->funcs == adev->dm.atomic_obj.funcs) {
10353 int j = state->num_private_objs-1;
10355 dm_atomic_destroy_state(obj,
10356 state->private_objs[i].state);
10358 /* If i is not at the end of the array then the
10359 * last element needs to be moved to where i was
10360 * before the array can safely be truncated.
10363 state->private_objs[i] =
10364 state->private_objs[j];
10366 state->private_objs[j].ptr = NULL;
10367 state->private_objs[j].state = NULL;
10368 state->private_objs[j].old_state = NULL;
10369 state->private_objs[j].new_state = NULL;
10371 state->num_private_objs = j;
10377 /* Store the overall update type for use later in atomic check. */
10378 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10379 struct dm_crtc_state *dm_new_crtc_state =
10380 to_dm_crtc_state(new_crtc_state);
10382 dm_new_crtc_state->update_type = lock_and_validation_needed ?
10387 /* Must be success */
10390 trace_amdgpu_dm_atomic_check_finish(state, ret);
10395 if (ret == -EDEADLK)
10396 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
10397 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
10398 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
10400 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
10402 trace_amdgpu_dm_atomic_check_finish(state, ret);
10407 static bool is_dp_capable_without_timing_msa(struct dc *dc,
10408 struct amdgpu_dm_connector *amdgpu_dm_connector)
10411 bool capable = false;
10413 if (amdgpu_dm_connector->dc_link &&
10414 dm_helpers_dp_read_dpcd(
10416 amdgpu_dm_connector->dc_link,
10417 DP_DOWN_STREAM_PORT_COUNT,
10419 sizeof(dpcd_data))) {
10420 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10426 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10427 uint8_t *edid_ext, int len,
10428 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10431 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10432 struct dc *dc = adev->dm.dc;
10434 /* send extension block to DMCU for parsing */
10435 for (i = 0; i < len; i += 8) {
10439 /* send 8 bytes a time */
10440 if (!dc_edid_parser_send_cea(dc, i, len, &edid_ext[i], 8))
10444 /* EDID block sent completed, expect result */
10445 int version, min_rate, max_rate;
10447 res = dc_edid_parser_recv_amd_vsdb(dc, &version, &min_rate, &max_rate);
10449 /* amd vsdb found */
10450 vsdb_info->freesync_supported = 1;
10451 vsdb_info->amd_vsdb_version = version;
10452 vsdb_info->min_refresh_rate_hz = min_rate;
10453 vsdb_info->max_refresh_rate_hz = max_rate;
10461 res = dc_edid_parser_recv_cea_ack(dc, &offset);
10469 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10470 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10472 uint8_t *edid_ext = NULL;
10474 bool valid_vsdb_found = false;
10476 /*----- drm_find_cea_extension() -----*/
10477 /* No EDID or EDID extensions */
10478 if (edid == NULL || edid->extensions == 0)
10481 /* Find CEA extension */
10482 for (i = 0; i < edid->extensions; i++) {
10483 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10484 if (edid_ext[0] == CEA_EXT)
10488 if (i == edid->extensions)
10491 /*----- cea_db_offsets() -----*/
10492 if (edid_ext[0] != CEA_EXT)
10495 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
10497 return valid_vsdb_found ? i : -ENODEV;
10500 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10504 struct detailed_timing *timing;
10505 struct detailed_non_pixel *data;
10506 struct detailed_data_monitor_range *range;
10507 struct amdgpu_dm_connector *amdgpu_dm_connector =
10508 to_amdgpu_dm_connector(connector);
10509 struct dm_connector_state *dm_con_state = NULL;
10511 struct drm_device *dev = connector->dev;
10512 struct amdgpu_device *adev = drm_to_adev(dev);
10513 bool freesync_capable = false;
10514 struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
10516 if (!connector->state) {
10517 DRM_ERROR("%s - Connector has no state", __func__);
10522 dm_con_state = to_dm_connector_state(connector->state);
10524 amdgpu_dm_connector->min_vfreq = 0;
10525 amdgpu_dm_connector->max_vfreq = 0;
10526 amdgpu_dm_connector->pixel_clock_mhz = 0;
10531 dm_con_state = to_dm_connector_state(connector->state);
10533 if (!amdgpu_dm_connector->dc_sink) {
10534 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
10537 if (!adev->dm.freesync_module)
10541 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10542 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
10543 bool edid_check_required = false;
10546 edid_check_required = is_dp_capable_without_timing_msa(
10548 amdgpu_dm_connector);
10551 if (edid_check_required == true && (edid->version > 1 ||
10552 (edid->version == 1 && edid->revision > 1))) {
10553 for (i = 0; i < 4; i++) {
10555 timing = &edid->detailed_timings[i];
10556 data = &timing->data.other_data;
10557 range = &data->data.range;
10559 * Check if monitor has continuous frequency mode
10561 if (data->type != EDID_DETAIL_MONITOR_RANGE)
10564 * Check for flag range limits only. If flag == 1 then
10565 * no additional timing information provided.
10566 * Default GTF, GTF Secondary curve and CVT are not
10569 if (range->flags != 1)
10572 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10573 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10574 amdgpu_dm_connector->pixel_clock_mhz =
10575 range->pixel_clock_mhz * 10;
10577 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10578 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10583 if (amdgpu_dm_connector->max_vfreq -
10584 amdgpu_dm_connector->min_vfreq > 10) {
10586 freesync_capable = true;
10589 } else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10590 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10591 if (i >= 0 && vsdb_info.freesync_supported) {
10592 timing = &edid->detailed_timings[i];
10593 data = &timing->data.other_data;
10595 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10596 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10597 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10598 freesync_capable = true;
10600 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10601 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10607 dm_con_state->freesync_capable = freesync_capable;
10609 if (connector->vrr_capable_property)
10610 drm_connector_set_vrr_capable_property(connector,
10614 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
10616 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
10618 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
10620 if (link->type == dc_connection_none)
10622 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
10623 dpcd_data, sizeof(dpcd_data))) {
10624 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
10626 if (dpcd_data[0] == 0) {
10627 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
10628 link->psr_settings.psr_feature_enabled = false;
10630 link->psr_settings.psr_version = DC_PSR_VERSION_1;
10631 link->psr_settings.psr_feature_enabled = true;
10634 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
10639 * amdgpu_dm_link_setup_psr() - configure psr link
10640 * @stream: stream state
10642 * Return: true if success
10644 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
10646 struct dc_link *link = NULL;
10647 struct psr_config psr_config = {0};
10648 struct psr_context psr_context = {0};
10651 if (stream == NULL)
10654 link = stream->link;
10656 psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
10658 if (psr_config.psr_version > 0) {
10659 psr_config.psr_exit_link_training_required = 0x1;
10660 psr_config.psr_frame_capture_indication_req = 0;
10661 psr_config.psr_rfb_setup_time = 0x37;
10662 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
10663 psr_config.allow_smu_optimizations = 0x0;
10665 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
10668 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
10674 * amdgpu_dm_psr_enable() - enable psr f/w
10675 * @stream: stream state
10677 * Return: true if success
10679 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
10681 struct dc_link *link = stream->link;
10682 unsigned int vsync_rate_hz = 0;
10683 struct dc_static_screen_params params = {0};
10684 /* Calculate number of static frames before generating interrupt to
10687 // Init fail safe of 2 frames static
10688 unsigned int num_frames_static = 2;
10690 DRM_DEBUG_DRIVER("Enabling psr...\n");
10692 vsync_rate_hz = div64_u64(div64_u64((
10693 stream->timing.pix_clk_100hz * 100),
10694 stream->timing.v_total),
10695 stream->timing.h_total);
10698 * Calculate number of frames such that at least 30 ms of time has
10701 if (vsync_rate_hz != 0) {
10702 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
10703 num_frames_static = (30000 / frame_time_microsec) + 1;
10706 params.triggers.cursor_update = true;
10707 params.triggers.overlay_update = true;
10708 params.triggers.surface_update = true;
10709 params.num_frames = num_frames_static;
10711 dc_stream_set_static_screen_params(link->ctx->dc,
10715 return dc_link_set_psr_allow_active(link, true, false, false);
10719 * amdgpu_dm_psr_disable() - disable psr f/w
10720 * @stream: stream state
10722 * Return: true if success
10724 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
10727 DRM_DEBUG_DRIVER("Disabling psr...\n");
10729 return dc_link_set_psr_allow_active(stream->link, false, true, false);
10733 * amdgpu_dm_psr_disable() - disable psr f/w
10734 * if psr is enabled on any stream
10736 * Return: true if success
10738 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
10740 DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
10741 return dc_set_psr_allow_active(dm->dc, false);
10744 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10746 struct amdgpu_device *adev = drm_to_adev(dev);
10747 struct dc *dc = adev->dm.dc;
10750 mutex_lock(&adev->dm.dc_lock);
10751 if (dc->current_state) {
10752 for (i = 0; i < dc->current_state->stream_count; ++i)
10753 dc->current_state->streams[i]
10754 ->triggered_crtc_reset.enabled =
10755 adev->dm.force_timing_sync;
10757 dm_enable_per_frame_crtc_master_sync(dc->current_state);
10758 dc_trigger_sync(dc, dc->current_state);
10760 mutex_unlock(&adev->dm.dc_lock);
10763 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10764 uint32_t value, const char *func_name)
10766 #ifdef DM_CHECK_ADDR_0
10767 if (address == 0) {
10768 DC_ERR("invalid register write. address = 0");
10772 cgs_write_register(ctx->cgs_device, address, value);
10773 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10776 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10777 const char *func_name)
10780 #ifdef DM_CHECK_ADDR_0
10781 if (address == 0) {
10782 DC_ERR("invalid register read; address = 0\n");
10787 if (ctx->dmub_srv &&
10788 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10789 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10794 value = cgs_read_register(ctx->cgs_device, address);
10796 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10801 int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int linkIndex,
10802 struct aux_payload *payload, enum aux_return_code_type *operation_result)
10804 struct amdgpu_device *adev = ctx->driver_context;
10807 dc_process_dmub_aux_transfer_async(ctx->dc, linkIndex, payload);
10808 ret = wait_for_completion_interruptible_timeout(&adev->dm.dmub_aux_transfer_done, 10*HZ);
10810 *operation_result = AUX_RET_ERROR_TIMEOUT;
10813 *operation_result = (enum aux_return_code_type)adev->dm.dmub_notify->result;
10815 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
10816 (*payload->reply) = adev->dm.dmub_notify->aux_reply.command;
10818 // For read case, Copy data to payload
10819 if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
10820 (*payload->reply == AUX_TRANSACTION_REPLY_AUX_ACK))
10821 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
10822 adev->dm.dmub_notify->aux_reply.length);
10825 return adev->dm.dmub_notify->aux_reply.length;