2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
29 #include "dm_services_types.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "dc/dc_edid_parser.h"
38 #include "dc/dc_stat.h"
39 #include "amdgpu_dm_trace.h"
43 #include "amdgpu_display.h"
44 #include "amdgpu_ucode.h"
46 #include "amdgpu_dm.h"
47 #ifdef CONFIG_DRM_AMD_DC_HDCP
48 #include "amdgpu_dm_hdcp.h"
49 #include <drm/drm_hdcp.h>
51 #include "amdgpu_pm.h"
53 #include "amd_shared.h"
54 #include "amdgpu_dm_irq.h"
55 #include "dm_helpers.h"
56 #include "amdgpu_dm_mst_types.h"
57 #if defined(CONFIG_DEBUG_FS)
58 #include "amdgpu_dm_debugfs.h"
61 #include "ivsrcid/ivsrcid_vislands30.h"
63 #include "i2caux_interface.h"
64 #include <linux/module.h>
65 #include <linux/moduleparam.h>
66 #include <linux/types.h>
67 #include <linux/pm_runtime.h>
68 #include <linux/pci.h>
69 #include <linux/firmware.h>
70 #include <linux/component.h>
72 #include <drm/drm_atomic.h>
73 #include <drm/drm_atomic_uapi.h>
74 #include <drm/drm_atomic_helper.h>
75 #include <drm/drm_dp_mst_helper.h>
76 #include <drm/drm_fb_helper.h>
77 #include <drm/drm_fourcc.h>
78 #include <drm/drm_edid.h>
79 #include <drm/drm_vblank.h>
80 #include <drm/drm_audio_component.h>
82 #if defined(CONFIG_DRM_AMD_DC_DCN)
83 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
85 #include "dcn/dcn_1_0_offset.h"
86 #include "dcn/dcn_1_0_sh_mask.h"
87 #include "soc15_hw_ip.h"
88 #include "vega10_ip_offset.h"
90 #include "soc15_common.h"
93 #include "modules/inc/mod_freesync.h"
94 #include "modules/power/power_helpers.h"
95 #include "modules/inc/mod_info_packet.h"
97 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
98 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
99 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
100 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
101 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
103 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
105 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
107 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
108 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
109 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
110 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
112 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
113 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
115 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
116 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
118 /* Number of bytes in PSP header for firmware. */
119 #define PSP_HEADER_BYTES 0x100
121 /* Number of bytes in PSP footer for firmware. */
122 #define PSP_FOOTER_BYTES 0x100
127 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
128 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
129 * requests into DC requests, and DC responses into DRM responses.
131 * The root control structure is &struct amdgpu_display_manager.
134 /* basic init/fini API */
135 static int amdgpu_dm_init(struct amdgpu_device *adev);
136 static void amdgpu_dm_fini(struct amdgpu_device *adev);
137 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
139 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
141 switch (link->dpcd_caps.dongle_type) {
142 case DISPLAY_DONGLE_NONE:
143 return DRM_MODE_SUBCONNECTOR_Native;
144 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
145 return DRM_MODE_SUBCONNECTOR_VGA;
146 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
147 case DISPLAY_DONGLE_DP_DVI_DONGLE:
148 return DRM_MODE_SUBCONNECTOR_DVID;
149 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
150 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
151 return DRM_MODE_SUBCONNECTOR_HDMIA;
152 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
154 return DRM_MODE_SUBCONNECTOR_Unknown;
158 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
160 struct dc_link *link = aconnector->dc_link;
161 struct drm_connector *connector = &aconnector->base;
162 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
164 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
167 if (aconnector->dc_sink)
168 subconnector = get_subconnector_type(link);
170 drm_object_property_set_value(&connector->base,
171 connector->dev->mode_config.dp_subconnector_property,
176 * initializes drm_device display related structures, based on the information
177 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
178 * drm_encoder, drm_mode_config
180 * Returns 0 on success
182 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
183 /* removes and deallocates the drm structures, created by the above function */
184 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
186 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
187 struct drm_plane *plane,
188 unsigned long possible_crtcs,
189 const struct dc_plane_cap *plane_cap);
190 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
191 struct drm_plane *plane,
192 uint32_t link_index);
193 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
194 struct amdgpu_dm_connector *amdgpu_dm_connector,
196 struct amdgpu_encoder *amdgpu_encoder);
197 static int amdgpu_dm_encoder_init(struct drm_device *dev,
198 struct amdgpu_encoder *aencoder,
199 uint32_t link_index);
201 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
203 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
205 static int amdgpu_dm_atomic_check(struct drm_device *dev,
206 struct drm_atomic_state *state);
208 static void handle_cursor_update(struct drm_plane *plane,
209 struct drm_plane_state *old_plane_state);
211 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
212 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
213 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
214 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
215 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
217 static const struct drm_format_info *
218 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
221 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
222 struct drm_crtc_state *new_crtc_state);
224 * dm_vblank_get_counter
227 * Get counter for number of vertical blanks
230 * struct amdgpu_device *adev - [in] desired amdgpu device
231 * int disp_idx - [in] which CRTC to get the counter from
234 * Counter for vertical blanks
236 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
238 if (crtc >= adev->mode_info.num_crtc)
241 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
243 if (acrtc->dm_irq_params.stream == NULL) {
244 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
249 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
253 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
254 u32 *vbl, u32 *position)
256 uint32_t v_blank_start, v_blank_end, h_position, v_position;
258 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
261 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
263 if (acrtc->dm_irq_params.stream == NULL) {
264 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
270 * TODO rework base driver to use values directly.
271 * for now parse it back into reg-format
273 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
279 *position = v_position | (h_position << 16);
280 *vbl = v_blank_start | (v_blank_end << 16);
286 static bool dm_is_idle(void *handle)
292 static int dm_wait_for_idle(void *handle)
298 static bool dm_check_soft_reset(void *handle)
303 static int dm_soft_reset(void *handle)
309 static struct amdgpu_crtc *
310 get_crtc_by_otg_inst(struct amdgpu_device *adev,
313 struct drm_device *dev = adev_to_drm(adev);
314 struct drm_crtc *crtc;
315 struct amdgpu_crtc *amdgpu_crtc;
317 if (otg_inst == -1) {
319 return adev->mode_info.crtcs[0];
322 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
323 amdgpu_crtc = to_amdgpu_crtc(crtc);
325 if (amdgpu_crtc->otg_inst == otg_inst)
332 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
334 return acrtc->dm_irq_params.freesync_config.state ==
335 VRR_STATE_ACTIVE_VARIABLE ||
336 acrtc->dm_irq_params.freesync_config.state ==
337 VRR_STATE_ACTIVE_FIXED;
340 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
342 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
343 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
346 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
347 struct dm_crtc_state *new_state)
349 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
351 else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
358 * dm_pflip_high_irq() - Handle pageflip interrupt
359 * @interrupt_params: ignored
361 * Handles the pageflip interrupt by notifying all interested parties
362 * that the pageflip has been completed.
364 static void dm_pflip_high_irq(void *interrupt_params)
366 struct amdgpu_crtc *amdgpu_crtc;
367 struct common_irq_params *irq_params = interrupt_params;
368 struct amdgpu_device *adev = irq_params->adev;
370 struct drm_pending_vblank_event *e;
371 uint32_t vpos, hpos, v_blank_start, v_blank_end;
374 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
376 /* IRQ could occur when in initial stage */
377 /* TODO work and BO cleanup */
378 if (amdgpu_crtc == NULL) {
379 DC_LOG_PFLIP("CRTC is null, returning.\n");
383 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
385 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
386 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
387 amdgpu_crtc->pflip_status,
388 AMDGPU_FLIP_SUBMITTED,
389 amdgpu_crtc->crtc_id,
391 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
395 /* page flip completed. */
396 e = amdgpu_crtc->event;
397 amdgpu_crtc->event = NULL;
402 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
404 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
406 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
407 &v_blank_end, &hpos, &vpos) ||
408 (vpos < v_blank_start)) {
409 /* Update to correct count and vblank timestamp if racing with
410 * vblank irq. This also updates to the correct vblank timestamp
411 * even in VRR mode, as scanout is past the front-porch atm.
413 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
415 /* Wake up userspace by sending the pageflip event with proper
416 * count and timestamp of vblank of flip completion.
419 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
421 /* Event sent, so done with vblank for this flip */
422 drm_crtc_vblank_put(&amdgpu_crtc->base);
425 /* VRR active and inside front-porch: vblank count and
426 * timestamp for pageflip event will only be up to date after
427 * drm_crtc_handle_vblank() has been executed from late vblank
428 * irq handler after start of back-porch (vline 0). We queue the
429 * pageflip event for send-out by drm_crtc_handle_vblank() with
430 * updated timestamp and count, once it runs after us.
432 * We need to open-code this instead of using the helper
433 * drm_crtc_arm_vblank_event(), as that helper would
434 * call drm_crtc_accurate_vblank_count(), which we must
435 * not call in VRR mode while we are in front-porch!
438 /* sequence will be replaced by real count during send-out. */
439 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
440 e->pipe = amdgpu_crtc->crtc_id;
442 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
446 /* Keep track of vblank of this flip for flip throttling. We use the
447 * cooked hw counter, as that one incremented at start of this vblank
448 * of pageflip completion, so last_flip_vblank is the forbidden count
449 * for queueing new pageflips if vsync + VRR is enabled.
451 amdgpu_crtc->dm_irq_params.last_flip_vblank =
452 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
454 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
455 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
457 DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
458 amdgpu_crtc->crtc_id, amdgpu_crtc,
459 vrr_active, (int) !e);
462 static void dm_vupdate_high_irq(void *interrupt_params)
464 struct common_irq_params *irq_params = interrupt_params;
465 struct amdgpu_device *adev = irq_params->adev;
466 struct amdgpu_crtc *acrtc;
467 struct drm_device *drm_dev;
468 struct drm_vblank_crtc *vblank;
469 ktime_t frame_duration_ns, previous_timestamp;
473 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
476 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
477 drm_dev = acrtc->base.dev;
478 vblank = &drm_dev->vblank[acrtc->base.index];
479 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
480 frame_duration_ns = vblank->time - previous_timestamp;
482 if (frame_duration_ns > 0) {
483 trace_amdgpu_refresh_rate_track(acrtc->base.index,
485 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
486 atomic64_set(&irq_params->previous_timestamp, vblank->time);
489 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
493 /* Core vblank handling is done here after end of front-porch in
494 * vrr mode, as vblank timestamping will give valid results
495 * while now done after front-porch. This will also deliver
496 * page-flip completion events that have been queued to us
497 * if a pageflip happened inside front-porch.
500 drm_crtc_handle_vblank(&acrtc->base);
502 /* BTR processing for pre-DCE12 ASICs */
503 if (acrtc->dm_irq_params.stream &&
504 adev->family < AMDGPU_FAMILY_AI) {
505 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
506 mod_freesync_handle_v_update(
507 adev->dm.freesync_module,
508 acrtc->dm_irq_params.stream,
509 &acrtc->dm_irq_params.vrr_params);
511 dc_stream_adjust_vmin_vmax(
513 acrtc->dm_irq_params.stream,
514 &acrtc->dm_irq_params.vrr_params.adjust);
515 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
522 * dm_crtc_high_irq() - Handles CRTC interrupt
523 * @interrupt_params: used for determining the CRTC instance
525 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
528 static void dm_crtc_high_irq(void *interrupt_params)
530 struct common_irq_params *irq_params = interrupt_params;
531 struct amdgpu_device *adev = irq_params->adev;
532 struct amdgpu_crtc *acrtc;
536 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
540 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
542 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
543 vrr_active, acrtc->dm_irq_params.active_planes);
546 * Core vblank handling at start of front-porch is only possible
547 * in non-vrr mode, as only there vblank timestamping will give
548 * valid results while done in front-porch. Otherwise defer it
549 * to dm_vupdate_high_irq after end of front-porch.
552 drm_crtc_handle_vblank(&acrtc->base);
555 * Following stuff must happen at start of vblank, for crc
556 * computation and below-the-range btr support in vrr mode.
558 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
560 /* BTR updates need to happen before VUPDATE on Vega and above. */
561 if (adev->family < AMDGPU_FAMILY_AI)
564 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
566 if (acrtc->dm_irq_params.stream &&
567 acrtc->dm_irq_params.vrr_params.supported &&
568 acrtc->dm_irq_params.freesync_config.state ==
569 VRR_STATE_ACTIVE_VARIABLE) {
570 mod_freesync_handle_v_update(adev->dm.freesync_module,
571 acrtc->dm_irq_params.stream,
572 &acrtc->dm_irq_params.vrr_params);
574 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
575 &acrtc->dm_irq_params.vrr_params.adjust);
579 * If there aren't any active_planes then DCH HUBP may be clock-gated.
580 * In that case, pageflip completion interrupts won't fire and pageflip
581 * completion events won't get delivered. Prevent this by sending
582 * pending pageflip events from here if a flip is still pending.
584 * If any planes are enabled, use dm_pflip_high_irq() instead, to
585 * avoid race conditions between flip programming and completion,
586 * which could cause too early flip completion events.
588 if (adev->family >= AMDGPU_FAMILY_RV &&
589 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
590 acrtc->dm_irq_params.active_planes == 0) {
592 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
594 drm_crtc_vblank_put(&acrtc->base);
596 acrtc->pflip_status = AMDGPU_FLIP_NONE;
599 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
602 #if defined(CONFIG_DRM_AMD_DC_DCN)
604 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
605 * DCN generation ASICs
606 * @interrupt params - interrupt parameters
608 * Used to set crc window/read out crc value at vertical line 0 position
610 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
611 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
613 struct common_irq_params *irq_params = interrupt_params;
614 struct amdgpu_device *adev = irq_params->adev;
615 struct amdgpu_crtc *acrtc;
617 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
622 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
627 * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
628 * @interrupt_params: used for determining the Outbox instance
630 * Handles the Outbox Interrupt
633 #define DMUB_TRACE_MAX_READ 64
634 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
636 struct dmub_notification notify;
637 struct common_irq_params *irq_params = interrupt_params;
638 struct amdgpu_device *adev = irq_params->adev;
639 struct amdgpu_display_manager *dm = &adev->dm;
640 struct dmcub_trace_buf_entry entry = { 0 };
643 if (dc_enable_dmub_notifications(adev->dm.dc)) {
644 if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
646 dc_stat_get_dmub_notification(adev->dm.dc, ¬ify);
647 } while (notify.pending_notification);
649 if (adev->dm.dmub_notify)
650 memcpy(adev->dm.dmub_notify, ¬ify, sizeof(struct dmub_notification));
651 if (notify.type == DMUB_NOTIFICATION_AUX_REPLY)
652 complete(&adev->dm.dmub_aux_transfer_done);
653 // TODO : HPD Implementation
656 DRM_ERROR("DM: Failed to receive correct outbox IRQ !");
662 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
663 trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
664 entry.param0, entry.param1);
666 DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
667 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
673 } while (count <= DMUB_TRACE_MAX_READ);
675 ASSERT(count <= DMUB_TRACE_MAX_READ);
679 static int dm_set_clockgating_state(void *handle,
680 enum amd_clockgating_state state)
685 static int dm_set_powergating_state(void *handle,
686 enum amd_powergating_state state)
691 /* Prototypes of private functions */
692 static int dm_early_init(void* handle);
694 /* Allocate memory for FBC compressed data */
695 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
697 struct drm_device *dev = connector->dev;
698 struct amdgpu_device *adev = drm_to_adev(dev);
699 struct dm_compressor_info *compressor = &adev->dm.compressor;
700 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
701 struct drm_display_mode *mode;
702 unsigned long max_size = 0;
704 if (adev->dm.dc->fbc_compressor == NULL)
707 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
710 if (compressor->bo_ptr)
714 list_for_each_entry(mode, &connector->modes, head) {
715 if (max_size < mode->htotal * mode->vtotal)
716 max_size = mode->htotal * mode->vtotal;
720 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
721 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
722 &compressor->gpu_addr, &compressor->cpu_addr);
725 DRM_ERROR("DM: Failed to initialize FBC\n");
727 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
728 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
735 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
736 int pipe, bool *enabled,
737 unsigned char *buf, int max_bytes)
739 struct drm_device *dev = dev_get_drvdata(kdev);
740 struct amdgpu_device *adev = drm_to_adev(dev);
741 struct drm_connector *connector;
742 struct drm_connector_list_iter conn_iter;
743 struct amdgpu_dm_connector *aconnector;
748 mutex_lock(&adev->dm.audio_lock);
750 drm_connector_list_iter_begin(dev, &conn_iter);
751 drm_for_each_connector_iter(connector, &conn_iter) {
752 aconnector = to_amdgpu_dm_connector(connector);
753 if (aconnector->audio_inst != port)
757 ret = drm_eld_size(connector->eld);
758 memcpy(buf, connector->eld, min(max_bytes, ret));
762 drm_connector_list_iter_end(&conn_iter);
764 mutex_unlock(&adev->dm.audio_lock);
766 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
771 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
772 .get_eld = amdgpu_dm_audio_component_get_eld,
775 static int amdgpu_dm_audio_component_bind(struct device *kdev,
776 struct device *hda_kdev, void *data)
778 struct drm_device *dev = dev_get_drvdata(kdev);
779 struct amdgpu_device *adev = drm_to_adev(dev);
780 struct drm_audio_component *acomp = data;
782 acomp->ops = &amdgpu_dm_audio_component_ops;
784 adev->dm.audio_component = acomp;
789 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
790 struct device *hda_kdev, void *data)
792 struct drm_device *dev = dev_get_drvdata(kdev);
793 struct amdgpu_device *adev = drm_to_adev(dev);
794 struct drm_audio_component *acomp = data;
798 adev->dm.audio_component = NULL;
801 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
802 .bind = amdgpu_dm_audio_component_bind,
803 .unbind = amdgpu_dm_audio_component_unbind,
806 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
813 adev->mode_info.audio.enabled = true;
815 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
817 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
818 adev->mode_info.audio.pin[i].channels = -1;
819 adev->mode_info.audio.pin[i].rate = -1;
820 adev->mode_info.audio.pin[i].bits_per_sample = -1;
821 adev->mode_info.audio.pin[i].status_bits = 0;
822 adev->mode_info.audio.pin[i].category_code = 0;
823 adev->mode_info.audio.pin[i].connected = false;
824 adev->mode_info.audio.pin[i].id =
825 adev->dm.dc->res_pool->audios[i]->inst;
826 adev->mode_info.audio.pin[i].offset = 0;
829 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
833 adev->dm.audio_registered = true;
838 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
843 if (!adev->mode_info.audio.enabled)
846 if (adev->dm.audio_registered) {
847 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
848 adev->dm.audio_registered = false;
851 /* TODO: Disable audio? */
853 adev->mode_info.audio.enabled = false;
856 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
858 struct drm_audio_component *acomp = adev->dm.audio_component;
860 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
861 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
863 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
868 static int dm_dmub_hw_init(struct amdgpu_device *adev)
870 const struct dmcub_firmware_header_v1_0 *hdr;
871 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
872 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
873 const struct firmware *dmub_fw = adev->dm.dmub_fw;
874 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
875 struct abm *abm = adev->dm.dc->res_pool->abm;
876 struct dmub_srv_hw_params hw_params;
877 enum dmub_status status;
878 const unsigned char *fw_inst_const, *fw_bss_data;
879 uint32_t i, fw_inst_const_size, fw_bss_data_size;
883 /* DMUB isn't supported on the ASIC. */
887 DRM_ERROR("No framebuffer info for DMUB service.\n");
892 /* Firmware required for DMUB support. */
893 DRM_ERROR("No firmware provided for DMUB.\n");
897 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
898 if (status != DMUB_STATUS_OK) {
899 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
903 if (!has_hw_support) {
904 DRM_INFO("DMUB unsupported on ASIC\n");
908 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
910 fw_inst_const = dmub_fw->data +
911 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
914 fw_bss_data = dmub_fw->data +
915 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
916 le32_to_cpu(hdr->inst_const_bytes);
918 /* Copy firmware and bios info into FB memory. */
919 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
920 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
922 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
924 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
925 * amdgpu_ucode_init_single_fw will load dmub firmware
926 * fw_inst_const part to cw0; otherwise, the firmware back door load
927 * will be done by dm_dmub_hw_init
929 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
930 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
934 if (fw_bss_data_size)
935 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
936 fw_bss_data, fw_bss_data_size);
938 /* Copy firmware bios info into FB memory. */
939 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
942 /* Reset regions that need to be reset. */
943 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
944 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
946 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
947 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
949 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
950 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
952 /* Initialize hardware. */
953 memset(&hw_params, 0, sizeof(hw_params));
954 hw_params.fb_base = adev->gmc.fb_start;
955 hw_params.fb_offset = adev->gmc.aper_base;
957 /* backdoor load firmware and trigger dmub running */
958 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
959 hw_params.load_inst_const = true;
962 hw_params.psp_version = dmcu->psp_version;
964 for (i = 0; i < fb_info->num_fb; ++i)
965 hw_params.fb[i] = &fb_info->fb[i];
967 status = dmub_srv_hw_init(dmub_srv, &hw_params);
968 if (status != DMUB_STATUS_OK) {
969 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
973 /* Wait for firmware load to finish. */
974 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
975 if (status != DMUB_STATUS_OK)
976 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
978 /* Init DMCU and ABM if available. */
980 dmcu->funcs->dmcu_init(dmcu);
981 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
984 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
985 if (!adev->dm.dc->ctx->dmub_srv) {
986 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
990 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
991 adev->dm.dmcub_fw_version);
996 #if defined(CONFIG_DRM_AMD_DC_DCN)
997 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1000 uint32_t logical_addr_low;
1001 uint32_t logical_addr_high;
1002 uint32_t agp_base, agp_bot, agp_top;
1003 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1005 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1006 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1008 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1010 * Raven2 has a HW issue that it is unable to use the vram which
1011 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1012 * workaround that increase system aperture high address (add 1)
1013 * to get rid of the VM fault and hardware hang.
1015 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1017 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1020 agp_bot = adev->gmc.agp_start >> 24;
1021 agp_top = adev->gmc.agp_end >> 24;
1024 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1025 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1026 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1027 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1028 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1029 page_table_base.low_part = lower_32_bits(pt_base);
1031 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1032 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1034 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1035 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1036 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1038 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1039 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1040 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1042 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1043 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1044 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1046 pa_config->is_hvm_enabled = 0;
1050 #if defined(CONFIG_DRM_AMD_DC_DCN)
1051 static void event_mall_stutter(struct work_struct *work)
1054 struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
1055 struct amdgpu_display_manager *dm = vblank_work->dm;
1057 mutex_lock(&dm->dc_lock);
1059 if (vblank_work->enable)
1060 dm->active_vblank_irq_count++;
1061 else if(dm->active_vblank_irq_count)
1062 dm->active_vblank_irq_count--;
1064 dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1066 DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1068 mutex_unlock(&dm->dc_lock);
1071 static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
1074 int max_caps = dc->caps.max_links;
1075 struct vblank_workqueue *vblank_work;
1078 vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
1079 if (ZERO_OR_NULL_PTR(vblank_work)) {
1084 for (i = 0; i < max_caps; i++)
1085 INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
1090 static int amdgpu_dm_init(struct amdgpu_device *adev)
1092 struct dc_init_data init_data;
1093 #ifdef CONFIG_DRM_AMD_DC_HDCP
1094 struct dc_callback_init init_params;
1098 adev->dm.ddev = adev_to_drm(adev);
1099 adev->dm.adev = adev;
1101 /* Zero all the fields */
1102 memset(&init_data, 0, sizeof(init_data));
1103 #ifdef CONFIG_DRM_AMD_DC_HDCP
1104 memset(&init_params, 0, sizeof(init_params));
1107 mutex_init(&adev->dm.dc_lock);
1108 mutex_init(&adev->dm.audio_lock);
1109 #if defined(CONFIG_DRM_AMD_DC_DCN)
1110 spin_lock_init(&adev->dm.vblank_lock);
1113 if(amdgpu_dm_irq_init(adev)) {
1114 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1118 init_data.asic_id.chip_family = adev->family;
1120 init_data.asic_id.pci_revision_id = adev->pdev->revision;
1121 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1123 init_data.asic_id.vram_width = adev->gmc.vram_width;
1124 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1125 init_data.asic_id.atombios_base_address =
1126 adev->mode_info.atom_context->bios;
1128 init_data.driver = adev;
1130 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1132 if (!adev->dm.cgs_device) {
1133 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1137 init_data.cgs_device = adev->dm.cgs_device;
1139 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1141 switch (adev->asic_type) {
1146 init_data.flags.gpu_vm_support = true;
1147 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1148 init_data.flags.disable_dmcu = true;
1150 #if defined(CONFIG_DRM_AMD_DC_DCN)
1152 init_data.flags.gpu_vm_support = true;
1159 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1160 init_data.flags.fbc_support = true;
1162 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1163 init_data.flags.multi_mon_pp_mclk_switch = true;
1165 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1166 init_data.flags.disable_fractional_pwm = true;
1168 init_data.flags.power_down_display_on_boot = true;
1170 INIT_LIST_HEAD(&adev->dm.da_list);
1171 /* Display Core create. */
1172 adev->dm.dc = dc_create(&init_data);
1175 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1177 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1181 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1182 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1183 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1186 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1187 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1189 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1190 adev->dm.dc->debug.disable_stutter = true;
1192 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1193 adev->dm.dc->debug.disable_dsc = true;
1195 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1196 adev->dm.dc->debug.disable_clock_gate = true;
1198 r = dm_dmub_hw_init(adev);
1200 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1204 dc_hardware_init(adev->dm.dc);
1206 #if defined(CONFIG_DRM_AMD_DC_DCN)
1207 if (adev->apu_flags) {
1208 struct dc_phy_addr_space_config pa_config;
1210 mmhub_read_system_context(adev, &pa_config);
1212 // Call the DC init_memory func
1213 dc_setup_system_context(adev->dm.dc, &pa_config);
1217 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1218 if (!adev->dm.freesync_module) {
1220 "amdgpu: failed to initialize freesync_module.\n");
1222 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1223 adev->dm.freesync_module);
1225 amdgpu_dm_init_color_mod();
1227 #if defined(CONFIG_DRM_AMD_DC_DCN)
1228 if (adev->dm.dc->caps.max_links > 0) {
1229 adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
1231 if (!adev->dm.vblank_workqueue)
1232 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1234 DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
1238 #ifdef CONFIG_DRM_AMD_DC_HDCP
1239 if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1240 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1242 if (!adev->dm.hdcp_workqueue)
1243 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1245 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1247 dc_init_callbacks(adev->dm.dc, &init_params);
1250 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1251 adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1253 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1254 init_completion(&adev->dm.dmub_aux_transfer_done);
1255 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1256 if (!adev->dm.dmub_notify) {
1257 DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1260 amdgpu_dm_outbox_init(adev);
1263 if (amdgpu_dm_initialize_drm_device(adev)) {
1265 "amdgpu: failed to initialize sw for display support.\n");
1269 /* create fake encoders for MST */
1270 dm_dp_create_fake_mst_encoders(adev);
1272 /* TODO: Add_display_info? */
1274 /* TODO use dynamic cursor width */
1275 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1276 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1278 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1280 "amdgpu: failed to initialize sw for display support.\n");
1285 DRM_DEBUG_DRIVER("KMS initialized.\n");
1289 amdgpu_dm_fini(adev);
1294 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1298 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1299 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1302 amdgpu_dm_audio_fini(adev);
1304 amdgpu_dm_destroy_drm_device(&adev->dm);
1306 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1307 if (adev->dm.crc_rd_wrk) {
1308 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1309 kfree(adev->dm.crc_rd_wrk);
1310 adev->dm.crc_rd_wrk = NULL;
1313 #ifdef CONFIG_DRM_AMD_DC_HDCP
1314 if (adev->dm.hdcp_workqueue) {
1315 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1316 adev->dm.hdcp_workqueue = NULL;
1320 dc_deinit_callbacks(adev->dm.dc);
1323 #if defined(CONFIG_DRM_AMD_DC_DCN)
1324 if (adev->dm.vblank_workqueue) {
1325 adev->dm.vblank_workqueue->dm = NULL;
1326 kfree(adev->dm.vblank_workqueue);
1327 adev->dm.vblank_workqueue = NULL;
1331 if (adev->dm.dc->ctx->dmub_srv) {
1332 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1333 adev->dm.dc->ctx->dmub_srv = NULL;
1336 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1337 kfree(adev->dm.dmub_notify);
1338 adev->dm.dmub_notify = NULL;
1341 if (adev->dm.dmub_bo)
1342 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1343 &adev->dm.dmub_bo_gpu_addr,
1344 &adev->dm.dmub_bo_cpu_addr);
1346 /* DC Destroy TODO: Replace destroy DAL */
1348 dc_destroy(&adev->dm.dc);
1350 * TODO: pageflip, vlank interrupt
1352 * amdgpu_dm_irq_fini(adev);
1355 if (adev->dm.cgs_device) {
1356 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1357 adev->dm.cgs_device = NULL;
1359 if (adev->dm.freesync_module) {
1360 mod_freesync_destroy(adev->dm.freesync_module);
1361 adev->dm.freesync_module = NULL;
1364 mutex_destroy(&adev->dm.audio_lock);
1365 mutex_destroy(&adev->dm.dc_lock);
1370 static int load_dmcu_fw(struct amdgpu_device *adev)
1372 const char *fw_name_dmcu = NULL;
1374 const struct dmcu_firmware_header_v1_0 *hdr;
1376 switch(adev->asic_type) {
1377 #if defined(CONFIG_DRM_AMD_DC_SI)
1392 case CHIP_POLARIS11:
1393 case CHIP_POLARIS10:
1394 case CHIP_POLARIS12:
1402 case CHIP_SIENNA_CICHLID:
1403 case CHIP_NAVY_FLOUNDER:
1404 case CHIP_DIMGREY_CAVEFISH:
1405 case CHIP_BEIGE_GOBY:
1409 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1412 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1413 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1414 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1415 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1420 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1424 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1425 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1429 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1431 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1432 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1433 adev->dm.fw_dmcu = NULL;
1437 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1442 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1444 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1446 release_firmware(adev->dm.fw_dmcu);
1447 adev->dm.fw_dmcu = NULL;
1451 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1452 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1453 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1454 adev->firmware.fw_size +=
1455 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1457 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1458 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1459 adev->firmware.fw_size +=
1460 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1462 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1464 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1469 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1471 struct amdgpu_device *adev = ctx;
1473 return dm_read_reg(adev->dm.dc->ctx, address);
1476 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1479 struct amdgpu_device *adev = ctx;
1481 return dm_write_reg(adev->dm.dc->ctx, address, value);
1484 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1486 struct dmub_srv_create_params create_params;
1487 struct dmub_srv_region_params region_params;
1488 struct dmub_srv_region_info region_info;
1489 struct dmub_srv_fb_params fb_params;
1490 struct dmub_srv_fb_info *fb_info;
1491 struct dmub_srv *dmub_srv;
1492 const struct dmcub_firmware_header_v1_0 *hdr;
1493 const char *fw_name_dmub;
1494 enum dmub_asic dmub_asic;
1495 enum dmub_status status;
1498 switch (adev->asic_type) {
1500 dmub_asic = DMUB_ASIC_DCN21;
1501 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1502 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1503 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1505 case CHIP_SIENNA_CICHLID:
1506 dmub_asic = DMUB_ASIC_DCN30;
1507 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1509 case CHIP_NAVY_FLOUNDER:
1510 dmub_asic = DMUB_ASIC_DCN30;
1511 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1514 dmub_asic = DMUB_ASIC_DCN301;
1515 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1517 case CHIP_DIMGREY_CAVEFISH:
1518 dmub_asic = DMUB_ASIC_DCN302;
1519 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1521 case CHIP_BEIGE_GOBY:
1522 dmub_asic = DMUB_ASIC_DCN303;
1523 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1527 /* ASIC doesn't support DMUB. */
1531 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1533 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1537 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1539 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1543 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1545 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1546 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1547 AMDGPU_UCODE_ID_DMCUB;
1548 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1550 adev->firmware.fw_size +=
1551 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1553 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1554 adev->dm.dmcub_fw_version);
1557 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1559 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1560 dmub_srv = adev->dm.dmub_srv;
1563 DRM_ERROR("Failed to allocate DMUB service!\n");
1567 memset(&create_params, 0, sizeof(create_params));
1568 create_params.user_ctx = adev;
1569 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1570 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1571 create_params.asic = dmub_asic;
1573 /* Create the DMUB service. */
1574 status = dmub_srv_create(dmub_srv, &create_params);
1575 if (status != DMUB_STATUS_OK) {
1576 DRM_ERROR("Error creating DMUB service: %d\n", status);
1580 /* Calculate the size of all the regions for the DMUB service. */
1581 memset(®ion_params, 0, sizeof(region_params));
1583 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1584 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1585 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1586 region_params.vbios_size = adev->bios_size;
1587 region_params.fw_bss_data = region_params.bss_data_size ?
1588 adev->dm.dmub_fw->data +
1589 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1590 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1591 region_params.fw_inst_const =
1592 adev->dm.dmub_fw->data +
1593 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1596 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
1599 if (status != DMUB_STATUS_OK) {
1600 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1605 * Allocate a framebuffer based on the total size of all the regions.
1606 * TODO: Move this into GART.
1608 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1609 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1610 &adev->dm.dmub_bo_gpu_addr,
1611 &adev->dm.dmub_bo_cpu_addr);
1615 /* Rebase the regions on the framebuffer address. */
1616 memset(&fb_params, 0, sizeof(fb_params));
1617 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1618 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1619 fb_params.region_info = ®ion_info;
1621 adev->dm.dmub_fb_info =
1622 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1623 fb_info = adev->dm.dmub_fb_info;
1627 "Failed to allocate framebuffer info for DMUB service!\n");
1631 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1632 if (status != DMUB_STATUS_OK) {
1633 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1640 static int dm_sw_init(void *handle)
1642 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1645 r = dm_dmub_sw_init(adev);
1649 return load_dmcu_fw(adev);
1652 static int dm_sw_fini(void *handle)
1654 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1656 kfree(adev->dm.dmub_fb_info);
1657 adev->dm.dmub_fb_info = NULL;
1659 if (adev->dm.dmub_srv) {
1660 dmub_srv_destroy(adev->dm.dmub_srv);
1661 adev->dm.dmub_srv = NULL;
1664 release_firmware(adev->dm.dmub_fw);
1665 adev->dm.dmub_fw = NULL;
1667 release_firmware(adev->dm.fw_dmcu);
1668 adev->dm.fw_dmcu = NULL;
1673 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1675 struct amdgpu_dm_connector *aconnector;
1676 struct drm_connector *connector;
1677 struct drm_connector_list_iter iter;
1680 drm_connector_list_iter_begin(dev, &iter);
1681 drm_for_each_connector_iter(connector, &iter) {
1682 aconnector = to_amdgpu_dm_connector(connector);
1683 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1684 aconnector->mst_mgr.aux) {
1685 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1687 aconnector->base.base.id);
1689 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1691 DRM_ERROR("DM_MST: Failed to start MST\n");
1692 aconnector->dc_link->type =
1693 dc_connection_single;
1698 drm_connector_list_iter_end(&iter);
1703 static int dm_late_init(void *handle)
1705 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1707 struct dmcu_iram_parameters params;
1708 unsigned int linear_lut[16];
1710 struct dmcu *dmcu = NULL;
1713 dmcu = adev->dm.dc->res_pool->dmcu;
1715 for (i = 0; i < 16; i++)
1716 linear_lut[i] = 0xFFFF * i / 15;
1719 params.backlight_ramping_start = 0xCCCC;
1720 params.backlight_ramping_reduction = 0xCCCCCCCC;
1721 params.backlight_lut_array_size = 16;
1722 params.backlight_lut_array = linear_lut;
1724 /* Min backlight level after ABM reduction, Don't allow below 1%
1725 * 0xFFFF x 0.01 = 0x28F
1727 params.min_abm_backlight = 0x28F;
1729 /* In the case where abm is implemented on dmcub,
1730 * dmcu object will be null.
1731 * ABM 2.4 and up are implemented on dmcub.
1734 ret = dmcu_load_iram(dmcu, params);
1735 else if (adev->dm.dc->ctx->dmub_srv)
1736 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1741 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1744 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1746 struct amdgpu_dm_connector *aconnector;
1747 struct drm_connector *connector;
1748 struct drm_connector_list_iter iter;
1749 struct drm_dp_mst_topology_mgr *mgr;
1751 bool need_hotplug = false;
1753 drm_connector_list_iter_begin(dev, &iter);
1754 drm_for_each_connector_iter(connector, &iter) {
1755 aconnector = to_amdgpu_dm_connector(connector);
1756 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1757 aconnector->mst_port)
1760 mgr = &aconnector->mst_mgr;
1763 drm_dp_mst_topology_mgr_suspend(mgr);
1765 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1767 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1768 need_hotplug = true;
1772 drm_connector_list_iter_end(&iter);
1775 drm_kms_helper_hotplug_event(dev);
1778 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1780 struct smu_context *smu = &adev->smu;
1783 if (!is_support_sw_smu(adev))
1786 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1787 * on window driver dc implementation.
1788 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1789 * should be passed to smu during boot up and resume from s3.
1790 * boot up: dc calculate dcn watermark clock settings within dc_create,
1791 * dcn20_resource_construct
1792 * then call pplib functions below to pass the settings to smu:
1793 * smu_set_watermarks_for_clock_ranges
1794 * smu_set_watermarks_table
1795 * navi10_set_watermarks_table
1796 * smu_write_watermarks_table
1798 * For Renoir, clock settings of dcn watermark are also fixed values.
1799 * dc has implemented different flow for window driver:
1800 * dc_hardware_init / dc_set_power_state
1805 * smu_set_watermarks_for_clock_ranges
1806 * renoir_set_watermarks_table
1807 * smu_write_watermarks_table
1810 * dc_hardware_init -> amdgpu_dm_init
1811 * dc_set_power_state --> dm_resume
1813 * therefore, this function apply to navi10/12/14 but not Renoir
1816 switch(adev->asic_type) {
1825 ret = smu_write_watermarks_table(smu);
1827 DRM_ERROR("Failed to update WMTABLE!\n");
1835 * dm_hw_init() - Initialize DC device
1836 * @handle: The base driver device containing the amdgpu_dm device.
1838 * Initialize the &struct amdgpu_display_manager device. This involves calling
1839 * the initializers of each DM component, then populating the struct with them.
1841 * Although the function implies hardware initialization, both hardware and
1842 * software are initialized here. Splitting them out to their relevant init
1843 * hooks is a future TODO item.
1845 * Some notable things that are initialized here:
1847 * - Display Core, both software and hardware
1848 * - DC modules that we need (freesync and color management)
1849 * - DRM software states
1850 * - Interrupt sources and handlers
1852 * - Debug FS entries, if enabled
1854 static int dm_hw_init(void *handle)
1856 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1857 /* Create DAL display manager */
1858 amdgpu_dm_init(adev);
1859 amdgpu_dm_hpd_init(adev);
1865 * dm_hw_fini() - Teardown DC device
1866 * @handle: The base driver device containing the amdgpu_dm device.
1868 * Teardown components within &struct amdgpu_display_manager that require
1869 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1870 * were loaded. Also flush IRQ workqueues and disable them.
1872 static int dm_hw_fini(void *handle)
1874 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1876 amdgpu_dm_hpd_fini(adev);
1878 amdgpu_dm_irq_fini(adev);
1879 amdgpu_dm_fini(adev);
1884 static int dm_enable_vblank(struct drm_crtc *crtc);
1885 static void dm_disable_vblank(struct drm_crtc *crtc);
1887 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1888 struct dc_state *state, bool enable)
1890 enum dc_irq_source irq_source;
1891 struct amdgpu_crtc *acrtc;
1895 for (i = 0; i < state->stream_count; i++) {
1896 acrtc = get_crtc_by_otg_inst(
1897 adev, state->stream_status[i].primary_otg_inst);
1899 if (acrtc && state->stream_status[i].plane_count != 0) {
1900 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1901 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1902 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
1903 acrtc->crtc_id, enable ? "en" : "dis", rc);
1905 DRM_WARN("Failed to %s pflip interrupts\n",
1906 enable ? "enable" : "disable");
1909 rc = dm_enable_vblank(&acrtc->base);
1911 DRM_WARN("Failed to enable vblank interrupts\n");
1913 dm_disable_vblank(&acrtc->base);
1921 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1923 struct dc_state *context = NULL;
1924 enum dc_status res = DC_ERROR_UNEXPECTED;
1926 struct dc_stream_state *del_streams[MAX_PIPES];
1927 int del_streams_count = 0;
1929 memset(del_streams, 0, sizeof(del_streams));
1931 context = dc_create_state(dc);
1932 if (context == NULL)
1933 goto context_alloc_fail;
1935 dc_resource_state_copy_construct_current(dc, context);
1937 /* First remove from context all streams */
1938 for (i = 0; i < context->stream_count; i++) {
1939 struct dc_stream_state *stream = context->streams[i];
1941 del_streams[del_streams_count++] = stream;
1944 /* Remove all planes for removed streams and then remove the streams */
1945 for (i = 0; i < del_streams_count; i++) {
1946 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1947 res = DC_FAIL_DETACH_SURFACES;
1951 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1957 res = dc_validate_global_state(dc, context, false);
1960 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1964 res = dc_commit_state(dc, context);
1967 dc_release_state(context);
1973 static int dm_suspend(void *handle)
1975 struct amdgpu_device *adev = handle;
1976 struct amdgpu_display_manager *dm = &adev->dm;
1979 if (amdgpu_in_reset(adev)) {
1980 mutex_lock(&dm->dc_lock);
1982 #if defined(CONFIG_DRM_AMD_DC_DCN)
1983 dc_allow_idle_optimizations(adev->dm.dc, false);
1986 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1988 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1990 amdgpu_dm_commit_zero_streams(dm->dc);
1992 amdgpu_dm_irq_suspend(adev);
1997 WARN_ON(adev->dm.cached_state);
1998 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2000 s3_handle_mst(adev_to_drm(adev), true);
2002 amdgpu_dm_irq_suspend(adev);
2005 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2010 static struct amdgpu_dm_connector *
2011 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2012 struct drm_crtc *crtc)
2015 struct drm_connector_state *new_con_state;
2016 struct drm_connector *connector;
2017 struct drm_crtc *crtc_from_state;
2019 for_each_new_connector_in_state(state, connector, new_con_state, i) {
2020 crtc_from_state = new_con_state->crtc;
2022 if (crtc_from_state == crtc)
2023 return to_amdgpu_dm_connector(connector);
2029 static void emulated_link_detect(struct dc_link *link)
2031 struct dc_sink_init_data sink_init_data = { 0 };
2032 struct display_sink_capability sink_caps = { 0 };
2033 enum dc_edid_status edid_status;
2034 struct dc_context *dc_ctx = link->ctx;
2035 struct dc_sink *sink = NULL;
2036 struct dc_sink *prev_sink = NULL;
2038 link->type = dc_connection_none;
2039 prev_sink = link->local_sink;
2042 dc_sink_release(prev_sink);
2044 switch (link->connector_signal) {
2045 case SIGNAL_TYPE_HDMI_TYPE_A: {
2046 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2047 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2051 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2052 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2053 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2057 case SIGNAL_TYPE_DVI_DUAL_LINK: {
2058 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2059 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2063 case SIGNAL_TYPE_LVDS: {
2064 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2065 sink_caps.signal = SIGNAL_TYPE_LVDS;
2069 case SIGNAL_TYPE_EDP: {
2070 sink_caps.transaction_type =
2071 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2072 sink_caps.signal = SIGNAL_TYPE_EDP;
2076 case SIGNAL_TYPE_DISPLAY_PORT: {
2077 sink_caps.transaction_type =
2078 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2079 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2084 DC_ERROR("Invalid connector type! signal:%d\n",
2085 link->connector_signal);
2089 sink_init_data.link = link;
2090 sink_init_data.sink_signal = sink_caps.signal;
2092 sink = dc_sink_create(&sink_init_data);
2094 DC_ERROR("Failed to create sink!\n");
2098 /* dc_sink_create returns a new reference */
2099 link->local_sink = sink;
2101 edid_status = dm_helpers_read_local_edid(
2106 if (edid_status != EDID_OK)
2107 DC_ERROR("Failed to read EDID");
2111 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2112 struct amdgpu_display_manager *dm)
2115 struct dc_surface_update surface_updates[MAX_SURFACES];
2116 struct dc_plane_info plane_infos[MAX_SURFACES];
2117 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2118 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2119 struct dc_stream_update stream_update;
2123 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2126 dm_error("Failed to allocate update bundle\n");
2130 for (k = 0; k < dc_state->stream_count; k++) {
2131 bundle->stream_update.stream = dc_state->streams[k];
2133 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2134 bundle->surface_updates[m].surface =
2135 dc_state->stream_status->plane_states[m];
2136 bundle->surface_updates[m].surface->force_full_update =
2139 dc_commit_updates_for_stream(
2140 dm->dc, bundle->surface_updates,
2141 dc_state->stream_status->plane_count,
2142 dc_state->streams[k], &bundle->stream_update, dc_state);
2151 static void dm_set_dpms_off(struct dc_link *link)
2153 struct dc_stream_state *stream_state;
2154 struct amdgpu_dm_connector *aconnector = link->priv;
2155 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2156 struct dc_stream_update stream_update;
2157 bool dpms_off = true;
2159 memset(&stream_update, 0, sizeof(stream_update));
2160 stream_update.dpms_off = &dpms_off;
2162 mutex_lock(&adev->dm.dc_lock);
2163 stream_state = dc_stream_find_from_link(link);
2165 if (stream_state == NULL) {
2166 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2167 mutex_unlock(&adev->dm.dc_lock);
2171 stream_update.stream = stream_state;
2172 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2173 stream_state, &stream_update,
2174 stream_state->ctx->dc->current_state);
2175 mutex_unlock(&adev->dm.dc_lock);
2178 static int dm_resume(void *handle)
2180 struct amdgpu_device *adev = handle;
2181 struct drm_device *ddev = adev_to_drm(adev);
2182 struct amdgpu_display_manager *dm = &adev->dm;
2183 struct amdgpu_dm_connector *aconnector;
2184 struct drm_connector *connector;
2185 struct drm_connector_list_iter iter;
2186 struct drm_crtc *crtc;
2187 struct drm_crtc_state *new_crtc_state;
2188 struct dm_crtc_state *dm_new_crtc_state;
2189 struct drm_plane *plane;
2190 struct drm_plane_state *new_plane_state;
2191 struct dm_plane_state *dm_new_plane_state;
2192 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2193 enum dc_connection_type new_connection_type = dc_connection_none;
2194 struct dc_state *dc_state;
2197 if (amdgpu_in_reset(adev)) {
2198 dc_state = dm->cached_dc_state;
2200 r = dm_dmub_hw_init(adev);
2202 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2204 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2207 amdgpu_dm_irq_resume_early(adev);
2209 for (i = 0; i < dc_state->stream_count; i++) {
2210 dc_state->streams[i]->mode_changed = true;
2211 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2212 dc_state->stream_status->plane_states[j]->update_flags.raw
2217 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2219 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2221 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2223 dc_release_state(dm->cached_dc_state);
2224 dm->cached_dc_state = NULL;
2226 amdgpu_dm_irq_resume_late(adev);
2228 mutex_unlock(&dm->dc_lock);
2232 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2233 dc_release_state(dm_state->context);
2234 dm_state->context = dc_create_state(dm->dc);
2235 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2236 dc_resource_state_construct(dm->dc, dm_state->context);
2238 /* Before powering on DC we need to re-initialize DMUB. */
2239 r = dm_dmub_hw_init(adev);
2241 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2243 /* power on hardware */
2244 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2246 /* program HPD filter */
2250 * early enable HPD Rx IRQ, should be done before set mode as short
2251 * pulse interrupts are used for MST
2253 amdgpu_dm_irq_resume_early(adev);
2255 /* On resume we need to rewrite the MSTM control bits to enable MST*/
2256 s3_handle_mst(ddev, false);
2259 drm_connector_list_iter_begin(ddev, &iter);
2260 drm_for_each_connector_iter(connector, &iter) {
2261 aconnector = to_amdgpu_dm_connector(connector);
2264 * this is the case when traversing through already created
2265 * MST connectors, should be skipped
2267 if (aconnector->mst_port)
2270 mutex_lock(&aconnector->hpd_lock);
2271 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2272 DRM_ERROR("KMS: Failed to detect connector\n");
2274 if (aconnector->base.force && new_connection_type == dc_connection_none)
2275 emulated_link_detect(aconnector->dc_link);
2277 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2279 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2280 aconnector->fake_enable = false;
2282 if (aconnector->dc_sink)
2283 dc_sink_release(aconnector->dc_sink);
2284 aconnector->dc_sink = NULL;
2285 amdgpu_dm_update_connector_after_detect(aconnector);
2286 mutex_unlock(&aconnector->hpd_lock);
2288 drm_connector_list_iter_end(&iter);
2290 /* Force mode set in atomic commit */
2291 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2292 new_crtc_state->active_changed = true;
2295 * atomic_check is expected to create the dc states. We need to release
2296 * them here, since they were duplicated as part of the suspend
2299 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2300 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2301 if (dm_new_crtc_state->stream) {
2302 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2303 dc_stream_release(dm_new_crtc_state->stream);
2304 dm_new_crtc_state->stream = NULL;
2308 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2309 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2310 if (dm_new_plane_state->dc_state) {
2311 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2312 dc_plane_state_release(dm_new_plane_state->dc_state);
2313 dm_new_plane_state->dc_state = NULL;
2317 drm_atomic_helper_resume(ddev, dm->cached_state);
2319 dm->cached_state = NULL;
2321 amdgpu_dm_irq_resume_late(adev);
2323 amdgpu_dm_smu_write_watermarks_table(adev);
2331 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2332 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2333 * the base driver's device list to be initialized and torn down accordingly.
2335 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2338 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2340 .early_init = dm_early_init,
2341 .late_init = dm_late_init,
2342 .sw_init = dm_sw_init,
2343 .sw_fini = dm_sw_fini,
2344 .hw_init = dm_hw_init,
2345 .hw_fini = dm_hw_fini,
2346 .suspend = dm_suspend,
2347 .resume = dm_resume,
2348 .is_idle = dm_is_idle,
2349 .wait_for_idle = dm_wait_for_idle,
2350 .check_soft_reset = dm_check_soft_reset,
2351 .soft_reset = dm_soft_reset,
2352 .set_clockgating_state = dm_set_clockgating_state,
2353 .set_powergating_state = dm_set_powergating_state,
2356 const struct amdgpu_ip_block_version dm_ip_block =
2358 .type = AMD_IP_BLOCK_TYPE_DCE,
2362 .funcs = &amdgpu_dm_funcs,
2372 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2373 .fb_create = amdgpu_display_user_framebuffer_create,
2374 .get_format_info = amd_get_format_info,
2375 .output_poll_changed = drm_fb_helper_output_poll_changed,
2376 .atomic_check = amdgpu_dm_atomic_check,
2377 .atomic_commit = drm_atomic_helper_commit,
2380 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2381 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2384 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2386 u32 max_cll, min_cll, max, min, q, r;
2387 struct amdgpu_dm_backlight_caps *caps;
2388 struct amdgpu_display_manager *dm;
2389 struct drm_connector *conn_base;
2390 struct amdgpu_device *adev;
2391 struct dc_link *link = NULL;
2392 static const u8 pre_computed_values[] = {
2393 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2394 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2396 if (!aconnector || !aconnector->dc_link)
2399 link = aconnector->dc_link;
2400 if (link->connector_signal != SIGNAL_TYPE_EDP)
2403 conn_base = &aconnector->base;
2404 adev = drm_to_adev(conn_base->dev);
2406 caps = &dm->backlight_caps;
2407 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2408 caps->aux_support = false;
2409 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2410 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2412 if (caps->ext_caps->bits.oled == 1 ||
2413 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2414 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2415 caps->aux_support = true;
2417 if (amdgpu_backlight == 0)
2418 caps->aux_support = false;
2419 else if (amdgpu_backlight == 1)
2420 caps->aux_support = true;
2422 /* From the specification (CTA-861-G), for calculating the maximum
2423 * luminance we need to use:
2424 * Luminance = 50*2**(CV/32)
2425 * Where CV is a one-byte value.
2426 * For calculating this expression we may need float point precision;
2427 * to avoid this complexity level, we take advantage that CV is divided
2428 * by a constant. From the Euclids division algorithm, we know that CV
2429 * can be written as: CV = 32*q + r. Next, we replace CV in the
2430 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2431 * need to pre-compute the value of r/32. For pre-computing the values
2432 * We just used the following Ruby line:
2433 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2434 * The results of the above expressions can be verified at
2435 * pre_computed_values.
2439 max = (1 << q) * pre_computed_values[r];
2441 // min luminance: maxLum * (CV/255)^2 / 100
2442 q = DIV_ROUND_CLOSEST(min_cll, 255);
2443 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2445 caps->aux_max_input_signal = max;
2446 caps->aux_min_input_signal = min;
2449 void amdgpu_dm_update_connector_after_detect(
2450 struct amdgpu_dm_connector *aconnector)
2452 struct drm_connector *connector = &aconnector->base;
2453 struct drm_device *dev = connector->dev;
2454 struct dc_sink *sink;
2456 /* MST handled by drm_mst framework */
2457 if (aconnector->mst_mgr.mst_state == true)
2460 sink = aconnector->dc_link->local_sink;
2462 dc_sink_retain(sink);
2465 * Edid mgmt connector gets first update only in mode_valid hook and then
2466 * the connector sink is set to either fake or physical sink depends on link status.
2467 * Skip if already done during boot.
2469 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2470 && aconnector->dc_em_sink) {
2473 * For S3 resume with headless use eml_sink to fake stream
2474 * because on resume connector->sink is set to NULL
2476 mutex_lock(&dev->mode_config.mutex);
2479 if (aconnector->dc_sink) {
2480 amdgpu_dm_update_freesync_caps(connector, NULL);
2482 * retain and release below are used to
2483 * bump up refcount for sink because the link doesn't point
2484 * to it anymore after disconnect, so on next crtc to connector
2485 * reshuffle by UMD we will get into unwanted dc_sink release
2487 dc_sink_release(aconnector->dc_sink);
2489 aconnector->dc_sink = sink;
2490 dc_sink_retain(aconnector->dc_sink);
2491 amdgpu_dm_update_freesync_caps(connector,
2494 amdgpu_dm_update_freesync_caps(connector, NULL);
2495 if (!aconnector->dc_sink) {
2496 aconnector->dc_sink = aconnector->dc_em_sink;
2497 dc_sink_retain(aconnector->dc_sink);
2501 mutex_unlock(&dev->mode_config.mutex);
2504 dc_sink_release(sink);
2509 * TODO: temporary guard to look for proper fix
2510 * if this sink is MST sink, we should not do anything
2512 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2513 dc_sink_release(sink);
2517 if (aconnector->dc_sink == sink) {
2519 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2522 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2523 aconnector->connector_id);
2525 dc_sink_release(sink);
2529 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2530 aconnector->connector_id, aconnector->dc_sink, sink);
2532 mutex_lock(&dev->mode_config.mutex);
2535 * 1. Update status of the drm connector
2536 * 2. Send an event and let userspace tell us what to do
2540 * TODO: check if we still need the S3 mode update workaround.
2541 * If yes, put it here.
2543 if (aconnector->dc_sink) {
2544 amdgpu_dm_update_freesync_caps(connector, NULL);
2545 dc_sink_release(aconnector->dc_sink);
2548 aconnector->dc_sink = sink;
2549 dc_sink_retain(aconnector->dc_sink);
2550 if (sink->dc_edid.length == 0) {
2551 aconnector->edid = NULL;
2552 if (aconnector->dc_link->aux_mode) {
2553 drm_dp_cec_unset_edid(
2554 &aconnector->dm_dp_aux.aux);
2558 (struct edid *)sink->dc_edid.raw_edid;
2560 drm_connector_update_edid_property(connector,
2562 if (aconnector->dc_link->aux_mode)
2563 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2567 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2568 update_connector_ext_caps(aconnector);
2570 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2571 amdgpu_dm_update_freesync_caps(connector, NULL);
2572 drm_connector_update_edid_property(connector, NULL);
2573 aconnector->num_modes = 0;
2574 dc_sink_release(aconnector->dc_sink);
2575 aconnector->dc_sink = NULL;
2576 aconnector->edid = NULL;
2577 #ifdef CONFIG_DRM_AMD_DC_HDCP
2578 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2579 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2580 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2584 mutex_unlock(&dev->mode_config.mutex);
2586 update_subconnector_property(aconnector);
2589 dc_sink_release(sink);
2592 static void handle_hpd_irq(void *param)
2594 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2595 struct drm_connector *connector = &aconnector->base;
2596 struct drm_device *dev = connector->dev;
2597 enum dc_connection_type new_connection_type = dc_connection_none;
2598 struct amdgpu_device *adev = drm_to_adev(dev);
2599 #ifdef CONFIG_DRM_AMD_DC_HDCP
2600 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2603 if (adev->dm.disable_hpd_irq)
2607 * In case of failure or MST no need to update connector status or notify the OS
2608 * since (for MST case) MST does this in its own context.
2610 mutex_lock(&aconnector->hpd_lock);
2612 #ifdef CONFIG_DRM_AMD_DC_HDCP
2613 if (adev->dm.hdcp_workqueue) {
2614 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2615 dm_con_state->update_hdcp = true;
2618 if (aconnector->fake_enable)
2619 aconnector->fake_enable = false;
2621 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2622 DRM_ERROR("KMS: Failed to detect connector\n");
2624 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2625 emulated_link_detect(aconnector->dc_link);
2628 drm_modeset_lock_all(dev);
2629 dm_restore_drm_connector_state(dev, connector);
2630 drm_modeset_unlock_all(dev);
2632 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2633 drm_kms_helper_hotplug_event(dev);
2635 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2636 if (new_connection_type == dc_connection_none &&
2637 aconnector->dc_link->type == dc_connection_none)
2638 dm_set_dpms_off(aconnector->dc_link);
2640 amdgpu_dm_update_connector_after_detect(aconnector);
2642 drm_modeset_lock_all(dev);
2643 dm_restore_drm_connector_state(dev, connector);
2644 drm_modeset_unlock_all(dev);
2646 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2647 drm_kms_helper_hotplug_event(dev);
2649 mutex_unlock(&aconnector->hpd_lock);
2653 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2655 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2657 bool new_irq_handled = false;
2659 int dpcd_bytes_to_read;
2661 const int max_process_count = 30;
2662 int process_count = 0;
2664 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2666 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2667 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2668 /* DPCD 0x200 - 0x201 for downstream IRQ */
2669 dpcd_addr = DP_SINK_COUNT;
2671 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2672 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2673 dpcd_addr = DP_SINK_COUNT_ESI;
2676 dret = drm_dp_dpcd_read(
2677 &aconnector->dm_dp_aux.aux,
2680 dpcd_bytes_to_read);
2682 while (dret == dpcd_bytes_to_read &&
2683 process_count < max_process_count) {
2689 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2690 /* handle HPD short pulse irq */
2691 if (aconnector->mst_mgr.mst_state)
2693 &aconnector->mst_mgr,
2697 if (new_irq_handled) {
2698 /* ACK at DPCD to notify down stream */
2699 const int ack_dpcd_bytes_to_write =
2700 dpcd_bytes_to_read - 1;
2702 for (retry = 0; retry < 3; retry++) {
2705 wret = drm_dp_dpcd_write(
2706 &aconnector->dm_dp_aux.aux,
2709 ack_dpcd_bytes_to_write);
2710 if (wret == ack_dpcd_bytes_to_write)
2714 /* check if there is new irq to be handled */
2715 dret = drm_dp_dpcd_read(
2716 &aconnector->dm_dp_aux.aux,
2719 dpcd_bytes_to_read);
2721 new_irq_handled = false;
2727 if (process_count == max_process_count)
2728 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2731 static void handle_hpd_rx_irq(void *param)
2733 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2734 struct drm_connector *connector = &aconnector->base;
2735 struct drm_device *dev = connector->dev;
2736 struct dc_link *dc_link = aconnector->dc_link;
2737 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2738 bool result = false;
2739 enum dc_connection_type new_connection_type = dc_connection_none;
2740 struct amdgpu_device *adev = drm_to_adev(dev);
2741 union hpd_irq_data hpd_irq_data;
2743 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2745 if (adev->dm.disable_hpd_irq)
2750 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2751 * conflict, after implement i2c helper, this mutex should be
2754 mutex_lock(&aconnector->hpd_lock);
2756 read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2758 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2759 (dc_link->type == dc_connection_mst_branch)) {
2760 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2762 dm_handle_hpd_rx_irq(aconnector);
2764 } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2766 dm_handle_hpd_rx_irq(aconnector);
2771 if (!amdgpu_in_reset(adev)) {
2772 mutex_lock(&adev->dm.dc_lock);
2773 #ifdef CONFIG_DRM_AMD_DC_HDCP
2774 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2776 result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2778 mutex_unlock(&adev->dm.dc_lock);
2782 if (result && !is_mst_root_connector) {
2783 /* Downstream Port status changed. */
2784 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2785 DRM_ERROR("KMS: Failed to detect connector\n");
2787 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2788 emulated_link_detect(dc_link);
2790 if (aconnector->fake_enable)
2791 aconnector->fake_enable = false;
2793 amdgpu_dm_update_connector_after_detect(aconnector);
2796 drm_modeset_lock_all(dev);
2797 dm_restore_drm_connector_state(dev, connector);
2798 drm_modeset_unlock_all(dev);
2800 drm_kms_helper_hotplug_event(dev);
2801 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2803 if (aconnector->fake_enable)
2804 aconnector->fake_enable = false;
2806 amdgpu_dm_update_connector_after_detect(aconnector);
2809 drm_modeset_lock_all(dev);
2810 dm_restore_drm_connector_state(dev, connector);
2811 drm_modeset_unlock_all(dev);
2813 drm_kms_helper_hotplug_event(dev);
2816 #ifdef CONFIG_DRM_AMD_DC_HDCP
2817 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2818 if (adev->dm.hdcp_workqueue)
2819 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2823 if (dc_link->type != dc_connection_mst_branch)
2824 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2826 mutex_unlock(&aconnector->hpd_lock);
2829 static void register_hpd_handlers(struct amdgpu_device *adev)
2831 struct drm_device *dev = adev_to_drm(adev);
2832 struct drm_connector *connector;
2833 struct amdgpu_dm_connector *aconnector;
2834 const struct dc_link *dc_link;
2835 struct dc_interrupt_params int_params = {0};
2837 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2838 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2840 list_for_each_entry(connector,
2841 &dev->mode_config.connector_list, head) {
2843 aconnector = to_amdgpu_dm_connector(connector);
2844 dc_link = aconnector->dc_link;
2846 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2847 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2848 int_params.irq_source = dc_link->irq_source_hpd;
2850 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2852 (void *) aconnector);
2855 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2857 /* Also register for DP short pulse (hpd_rx). */
2858 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2859 int_params.irq_source = dc_link->irq_source_hpd_rx;
2861 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2863 (void *) aconnector);
2868 #if defined(CONFIG_DRM_AMD_DC_SI)
2869 /* Register IRQ sources and initialize IRQ callbacks */
2870 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2872 struct dc *dc = adev->dm.dc;
2873 struct common_irq_params *c_irq_params;
2874 struct dc_interrupt_params int_params = {0};
2877 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2879 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2880 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2883 * Actions of amdgpu_irq_add_id():
2884 * 1. Register a set() function with base driver.
2885 * Base driver will call set() function to enable/disable an
2886 * interrupt in DC hardware.
2887 * 2. Register amdgpu_dm_irq_handler().
2888 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2889 * coming from DC hardware.
2890 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2891 * for acknowledging and handling. */
2893 /* Use VBLANK interrupt */
2894 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2895 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2897 DRM_ERROR("Failed to add crtc irq id!\n");
2901 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2902 int_params.irq_source =
2903 dc_interrupt_to_irq_source(dc, i+1 , 0);
2905 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2907 c_irq_params->adev = adev;
2908 c_irq_params->irq_src = int_params.irq_source;
2910 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2911 dm_crtc_high_irq, c_irq_params);
2914 /* Use GRPH_PFLIP interrupt */
2915 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2916 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2917 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2919 DRM_ERROR("Failed to add page flip irq id!\n");
2923 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2924 int_params.irq_source =
2925 dc_interrupt_to_irq_source(dc, i, 0);
2927 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2929 c_irq_params->adev = adev;
2930 c_irq_params->irq_src = int_params.irq_source;
2932 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2933 dm_pflip_high_irq, c_irq_params);
2938 r = amdgpu_irq_add_id(adev, client_id,
2939 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2941 DRM_ERROR("Failed to add hpd irq id!\n");
2945 register_hpd_handlers(adev);
2951 /* Register IRQ sources and initialize IRQ callbacks */
2952 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2954 struct dc *dc = adev->dm.dc;
2955 struct common_irq_params *c_irq_params;
2956 struct dc_interrupt_params int_params = {0};
2959 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2961 if (adev->asic_type >= CHIP_VEGA10)
2962 client_id = SOC15_IH_CLIENTID_DCE;
2964 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2965 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2968 * Actions of amdgpu_irq_add_id():
2969 * 1. Register a set() function with base driver.
2970 * Base driver will call set() function to enable/disable an
2971 * interrupt in DC hardware.
2972 * 2. Register amdgpu_dm_irq_handler().
2973 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2974 * coming from DC hardware.
2975 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2976 * for acknowledging and handling. */
2978 /* Use VBLANK interrupt */
2979 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2980 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2982 DRM_ERROR("Failed to add crtc irq id!\n");
2986 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2987 int_params.irq_source =
2988 dc_interrupt_to_irq_source(dc, i, 0);
2990 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2992 c_irq_params->adev = adev;
2993 c_irq_params->irq_src = int_params.irq_source;
2995 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2996 dm_crtc_high_irq, c_irq_params);
2999 /* Use VUPDATE interrupt */
3000 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3001 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3003 DRM_ERROR("Failed to add vupdate irq id!\n");
3007 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3008 int_params.irq_source =
3009 dc_interrupt_to_irq_source(dc, i, 0);
3011 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3013 c_irq_params->adev = adev;
3014 c_irq_params->irq_src = int_params.irq_source;
3016 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3017 dm_vupdate_high_irq, c_irq_params);
3020 /* Use GRPH_PFLIP interrupt */
3021 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3022 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3023 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3025 DRM_ERROR("Failed to add page flip irq id!\n");
3029 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3030 int_params.irq_source =
3031 dc_interrupt_to_irq_source(dc, i, 0);
3033 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3035 c_irq_params->adev = adev;
3036 c_irq_params->irq_src = int_params.irq_source;
3038 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3039 dm_pflip_high_irq, c_irq_params);
3044 r = amdgpu_irq_add_id(adev, client_id,
3045 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3047 DRM_ERROR("Failed to add hpd irq id!\n");
3051 register_hpd_handlers(adev);
3056 #if defined(CONFIG_DRM_AMD_DC_DCN)
3057 /* Register IRQ sources and initialize IRQ callbacks */
3058 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3060 struct dc *dc = adev->dm.dc;
3061 struct common_irq_params *c_irq_params;
3062 struct dc_interrupt_params int_params = {0};
3065 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3066 static const unsigned int vrtl_int_srcid[] = {
3067 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3068 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3069 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3070 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3071 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3072 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3076 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3077 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3080 * Actions of amdgpu_irq_add_id():
3081 * 1. Register a set() function with base driver.
3082 * Base driver will call set() function to enable/disable an
3083 * interrupt in DC hardware.
3084 * 2. Register amdgpu_dm_irq_handler().
3085 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3086 * coming from DC hardware.
3087 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3088 * for acknowledging and handling.
3091 /* Use VSTARTUP interrupt */
3092 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3093 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3095 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3098 DRM_ERROR("Failed to add crtc irq id!\n");
3102 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3103 int_params.irq_source =
3104 dc_interrupt_to_irq_source(dc, i, 0);
3106 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3108 c_irq_params->adev = adev;
3109 c_irq_params->irq_src = int_params.irq_source;
3111 amdgpu_dm_irq_register_interrupt(
3112 adev, &int_params, dm_crtc_high_irq, c_irq_params);
3115 /* Use otg vertical line interrupt */
3116 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3117 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3118 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3119 vrtl_int_srcid[i], &adev->vline0_irq);
3122 DRM_ERROR("Failed to add vline0 irq id!\n");
3126 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3127 int_params.irq_source =
3128 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3130 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3131 DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3135 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3136 - DC_IRQ_SOURCE_DC1_VLINE0];
3138 c_irq_params->adev = adev;
3139 c_irq_params->irq_src = int_params.irq_source;
3141 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3142 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3146 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3147 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3148 * to trigger at end of each vblank, regardless of state of the lock,
3149 * matching DCE behaviour.
3151 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3152 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3154 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3157 DRM_ERROR("Failed to add vupdate irq id!\n");
3161 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3162 int_params.irq_source =
3163 dc_interrupt_to_irq_source(dc, i, 0);
3165 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3167 c_irq_params->adev = adev;
3168 c_irq_params->irq_src = int_params.irq_source;
3170 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3171 dm_vupdate_high_irq, c_irq_params);
3174 /* Use GRPH_PFLIP interrupt */
3175 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3176 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3178 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3180 DRM_ERROR("Failed to add page flip irq id!\n");
3184 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3185 int_params.irq_source =
3186 dc_interrupt_to_irq_source(dc, i, 0);
3188 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3190 c_irq_params->adev = adev;
3191 c_irq_params->irq_src = int_params.irq_source;
3193 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3194 dm_pflip_high_irq, c_irq_params);
3199 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3202 DRM_ERROR("Failed to add hpd irq id!\n");
3206 register_hpd_handlers(adev);
3210 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3211 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3213 struct dc *dc = adev->dm.dc;
3214 struct common_irq_params *c_irq_params;
3215 struct dc_interrupt_params int_params = {0};
3218 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3219 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3221 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3222 &adev->dmub_outbox_irq);
3224 DRM_ERROR("Failed to add outbox irq id!\n");
3228 if (dc->ctx->dmub_srv) {
3229 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3230 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3231 int_params.irq_source =
3232 dc_interrupt_to_irq_source(dc, i, 0);
3234 c_irq_params = &adev->dm.dmub_outbox_params[0];
3236 c_irq_params->adev = adev;
3237 c_irq_params->irq_src = int_params.irq_source;
3239 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3240 dm_dmub_outbox1_low_irq, c_irq_params);
3248 * Acquires the lock for the atomic state object and returns
3249 * the new atomic state.
3251 * This should only be called during atomic check.
3253 static int dm_atomic_get_state(struct drm_atomic_state *state,
3254 struct dm_atomic_state **dm_state)
3256 struct drm_device *dev = state->dev;
3257 struct amdgpu_device *adev = drm_to_adev(dev);
3258 struct amdgpu_display_manager *dm = &adev->dm;
3259 struct drm_private_state *priv_state;
3264 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3265 if (IS_ERR(priv_state))
3266 return PTR_ERR(priv_state);
3268 *dm_state = to_dm_atomic_state(priv_state);
3273 static struct dm_atomic_state *
3274 dm_atomic_get_new_state(struct drm_atomic_state *state)
3276 struct drm_device *dev = state->dev;
3277 struct amdgpu_device *adev = drm_to_adev(dev);
3278 struct amdgpu_display_manager *dm = &adev->dm;
3279 struct drm_private_obj *obj;
3280 struct drm_private_state *new_obj_state;
3283 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3284 if (obj->funcs == dm->atomic_obj.funcs)
3285 return to_dm_atomic_state(new_obj_state);
3291 static struct drm_private_state *
3292 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3294 struct dm_atomic_state *old_state, *new_state;
3296 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3300 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3302 old_state = to_dm_atomic_state(obj->state);
3304 if (old_state && old_state->context)
3305 new_state->context = dc_copy_state(old_state->context);
3307 if (!new_state->context) {
3312 return &new_state->base;
3315 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3316 struct drm_private_state *state)
3318 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3320 if (dm_state && dm_state->context)
3321 dc_release_state(dm_state->context);
3326 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3327 .atomic_duplicate_state = dm_atomic_duplicate_state,
3328 .atomic_destroy_state = dm_atomic_destroy_state,
3331 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3333 struct dm_atomic_state *state;
3336 adev->mode_info.mode_config_initialized = true;
3338 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3339 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3341 adev_to_drm(adev)->mode_config.max_width = 16384;
3342 adev_to_drm(adev)->mode_config.max_height = 16384;
3344 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3345 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3346 /* indicates support for immediate flip */
3347 adev_to_drm(adev)->mode_config.async_page_flip = true;
3349 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3351 state = kzalloc(sizeof(*state), GFP_KERNEL);
3355 state->context = dc_create_state(adev->dm.dc);
3356 if (!state->context) {
3361 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3363 drm_atomic_private_obj_init(adev_to_drm(adev),
3364 &adev->dm.atomic_obj,
3366 &dm_atomic_state_funcs);
3368 r = amdgpu_display_modeset_create_props(adev);
3370 dc_release_state(state->context);
3375 r = amdgpu_dm_audio_init(adev);
3377 dc_release_state(state->context);
3385 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3386 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3387 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3389 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3390 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3392 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3394 #if defined(CONFIG_ACPI)
3395 struct amdgpu_dm_backlight_caps caps;
3397 memset(&caps, 0, sizeof(caps));
3399 if (dm->backlight_caps.caps_valid)
3402 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3403 if (caps.caps_valid) {
3404 dm->backlight_caps.caps_valid = true;
3405 if (caps.aux_support)
3407 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3408 dm->backlight_caps.max_input_signal = caps.max_input_signal;
3410 dm->backlight_caps.min_input_signal =
3411 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3412 dm->backlight_caps.max_input_signal =
3413 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3416 if (dm->backlight_caps.aux_support)
3419 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3420 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3424 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3425 unsigned *min, unsigned *max)
3430 if (caps->aux_support) {
3431 // Firmware limits are in nits, DC API wants millinits.
3432 *max = 1000 * caps->aux_max_input_signal;
3433 *min = 1000 * caps->aux_min_input_signal;
3435 // Firmware limits are 8-bit, PWM control is 16-bit.
3436 *max = 0x101 * caps->max_input_signal;
3437 *min = 0x101 * caps->min_input_signal;
3442 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3443 uint32_t brightness)
3447 if (!get_brightness_range(caps, &min, &max))
3450 // Rescale 0..255 to min..max
3451 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3452 AMDGPU_MAX_BL_LEVEL);
3455 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3456 uint32_t brightness)
3460 if (!get_brightness_range(caps, &min, &max))
3463 if (brightness < min)
3465 // Rescale min..max to 0..255
3466 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3470 static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3471 u32 user_brightness)
3473 struct amdgpu_dm_backlight_caps caps;
3474 struct dc_link *link[AMDGPU_DM_MAX_NUM_EDP];
3475 u32 brightness[AMDGPU_DM_MAX_NUM_EDP];
3479 amdgpu_dm_update_backlight_caps(dm);
3480 caps = dm->backlight_caps;
3482 for (i = 0; i < dm->num_of_edps; i++) {
3483 dm->brightness[i] = user_brightness;
3484 brightness[i] = convert_brightness_from_user(&caps, dm->brightness[i]);
3485 link[i] = (struct dc_link *)dm->backlight_link[i];
3488 /* Change brightness based on AUX property */
3489 if (caps.aux_support) {
3490 for (i = 0; i < dm->num_of_edps; i++) {
3491 rc = dc_link_set_backlight_level_nits(link[i], true, brightness[i],
3492 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3494 DRM_ERROR("DM: Failed to update backlight via AUX on eDP[%d]\n", i);
3499 for (i = 0; i < dm->num_of_edps; i++) {
3500 rc = dc_link_set_backlight_level(dm->backlight_link[i], brightness[i], 0);
3502 DRM_ERROR("DM: Failed to update backlight on eDP[%d]\n", i);
3511 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3513 struct amdgpu_display_manager *dm = bl_get_data(bd);
3515 amdgpu_dm_backlight_set_level(dm, bd->props.brightness);
3520 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm)
3522 struct amdgpu_dm_backlight_caps caps;
3524 amdgpu_dm_update_backlight_caps(dm);
3525 caps = dm->backlight_caps;
3527 if (caps.aux_support) {
3528 struct dc_link *link = (struct dc_link *)dm->backlight_link[0];
3532 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3534 return dm->brightness[0];
3535 return convert_brightness_to_user(&caps, avg);
3537 int ret = dc_link_get_backlight_level(dm->backlight_link[0]);
3539 if (ret == DC_ERROR_UNEXPECTED)
3540 return dm->brightness[0];
3541 return convert_brightness_to_user(&caps, ret);
3545 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3547 struct amdgpu_display_manager *dm = bl_get_data(bd);
3549 return amdgpu_dm_backlight_get_level(dm);
3552 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3553 .options = BL_CORE_SUSPENDRESUME,
3554 .get_brightness = amdgpu_dm_backlight_get_brightness,
3555 .update_status = amdgpu_dm_backlight_update_status,
3559 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3562 struct backlight_properties props = { 0 };
3565 amdgpu_dm_update_backlight_caps(dm);
3566 for (i = 0; i < dm->num_of_edps; i++)
3567 dm->brightness[i] = AMDGPU_MAX_BL_LEVEL;
3569 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3570 props.brightness = AMDGPU_MAX_BL_LEVEL;
3571 props.type = BACKLIGHT_RAW;
3573 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3574 adev_to_drm(dm->adev)->primary->index);
3576 dm->backlight_dev = backlight_device_register(bl_name,
3577 adev_to_drm(dm->adev)->dev,
3579 &amdgpu_dm_backlight_ops,
3582 if (IS_ERR(dm->backlight_dev))
3583 DRM_ERROR("DM: Backlight registration failed!\n");
3585 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3590 static int initialize_plane(struct amdgpu_display_manager *dm,
3591 struct amdgpu_mode_info *mode_info, int plane_id,
3592 enum drm_plane_type plane_type,
3593 const struct dc_plane_cap *plane_cap)
3595 struct drm_plane *plane;
3596 unsigned long possible_crtcs;
3599 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3601 DRM_ERROR("KMS: Failed to allocate plane\n");
3604 plane->type = plane_type;
3607 * HACK: IGT tests expect that the primary plane for a CRTC
3608 * can only have one possible CRTC. Only expose support for
3609 * any CRTC if they're not going to be used as a primary plane
3610 * for a CRTC - like overlay or underlay planes.
3612 possible_crtcs = 1 << plane_id;
3613 if (plane_id >= dm->dc->caps.max_streams)
3614 possible_crtcs = 0xff;
3616 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3619 DRM_ERROR("KMS: Failed to initialize plane\n");
3625 mode_info->planes[plane_id] = plane;
3631 static void register_backlight_device(struct amdgpu_display_manager *dm,
3632 struct dc_link *link)
3634 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3635 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3637 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3638 link->type != dc_connection_none) {
3640 * Event if registration failed, we should continue with
3641 * DM initialization because not having a backlight control
3642 * is better then a black screen.
3644 if (!dm->backlight_dev)
3645 amdgpu_dm_register_backlight_device(dm);
3647 if (dm->backlight_dev) {
3648 dm->backlight_link[dm->num_of_edps] = link;
3657 * In this architecture, the association
3658 * connector -> encoder -> crtc
3659 * id not really requried. The crtc and connector will hold the
3660 * display_index as an abstraction to use with DAL component
3662 * Returns 0 on success
3664 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3666 struct amdgpu_display_manager *dm = &adev->dm;
3668 struct amdgpu_dm_connector *aconnector = NULL;
3669 struct amdgpu_encoder *aencoder = NULL;
3670 struct amdgpu_mode_info *mode_info = &adev->mode_info;
3672 int32_t primary_planes;
3673 enum dc_connection_type new_connection_type = dc_connection_none;
3674 const struct dc_plane_cap *plane;
3676 dm->display_indexes_num = dm->dc->caps.max_streams;
3677 /* Update the actual used number of crtc */
3678 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3680 link_cnt = dm->dc->caps.max_links;
3681 if (amdgpu_dm_mode_config_init(dm->adev)) {
3682 DRM_ERROR("DM: Failed to initialize mode config\n");
3686 /* There is one primary plane per CRTC */
3687 primary_planes = dm->dc->caps.max_streams;
3688 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3691 * Initialize primary planes, implicit planes for legacy IOCTLS.
3692 * Order is reversed to match iteration order in atomic check.
3694 for (i = (primary_planes - 1); i >= 0; i--) {
3695 plane = &dm->dc->caps.planes[i];
3697 if (initialize_plane(dm, mode_info, i,
3698 DRM_PLANE_TYPE_PRIMARY, plane)) {
3699 DRM_ERROR("KMS: Failed to initialize primary plane\n");
3705 * Initialize overlay planes, index starting after primary planes.
3706 * These planes have a higher DRM index than the primary planes since
3707 * they should be considered as having a higher z-order.
3708 * Order is reversed to match iteration order in atomic check.
3710 * Only support DCN for now, and only expose one so we don't encourage
3711 * userspace to use up all the pipes.
3713 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3714 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3716 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3719 if (!plane->blends_with_above || !plane->blends_with_below)
3722 if (!plane->pixel_format_support.argb8888)
3725 if (initialize_plane(dm, NULL, primary_planes + i,
3726 DRM_PLANE_TYPE_OVERLAY, plane)) {
3727 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3731 /* Only create one overlay plane. */
3735 for (i = 0; i < dm->dc->caps.max_streams; i++)
3736 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3737 DRM_ERROR("KMS: Failed to initialize crtc\n");
3741 #if defined(CONFIG_DRM_AMD_DC_DCN)
3742 /* Use Outbox interrupt */
3743 switch (adev->asic_type) {
3744 case CHIP_SIENNA_CICHLID:
3745 case CHIP_NAVY_FLOUNDER:
3747 if (register_outbox_irq_handlers(dm->adev)) {
3748 DRM_ERROR("DM: Failed to initialize IRQ\n");
3753 DRM_DEBUG_KMS("Unsupported ASIC type for outbox: 0x%X\n", adev->asic_type);
3757 /* loops over all connectors on the board */
3758 for (i = 0; i < link_cnt; i++) {
3759 struct dc_link *link = NULL;
3761 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3763 "KMS: Cannot support more than %d display indexes\n",
3764 AMDGPU_DM_MAX_DISPLAY_INDEX);
3768 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3772 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3776 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3777 DRM_ERROR("KMS: Failed to initialize encoder\n");
3781 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3782 DRM_ERROR("KMS: Failed to initialize connector\n");
3786 link = dc_get_link_at_index(dm->dc, i);
3788 if (!dc_link_detect_sink(link, &new_connection_type))
3789 DRM_ERROR("KMS: Failed to detect connector\n");
3791 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3792 emulated_link_detect(link);
3793 amdgpu_dm_update_connector_after_detect(aconnector);
3795 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3796 amdgpu_dm_update_connector_after_detect(aconnector);
3797 register_backlight_device(dm, link);
3798 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3799 amdgpu_dm_set_psr_caps(link);
3805 /* Software is initialized. Now we can register interrupt handlers. */
3806 switch (adev->asic_type) {
3807 #if defined(CONFIG_DRM_AMD_DC_SI)
3812 if (dce60_register_irq_handlers(dm->adev)) {
3813 DRM_ERROR("DM: Failed to initialize IRQ\n");
3827 case CHIP_POLARIS11:
3828 case CHIP_POLARIS10:
3829 case CHIP_POLARIS12:
3834 if (dce110_register_irq_handlers(dm->adev)) {
3835 DRM_ERROR("DM: Failed to initialize IRQ\n");
3839 #if defined(CONFIG_DRM_AMD_DC_DCN)
3845 case CHIP_SIENNA_CICHLID:
3846 case CHIP_NAVY_FLOUNDER:
3847 case CHIP_DIMGREY_CAVEFISH:
3848 case CHIP_BEIGE_GOBY:
3850 if (dcn10_register_irq_handlers(dm->adev)) {
3851 DRM_ERROR("DM: Failed to initialize IRQ\n");
3857 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3869 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3871 drm_mode_config_cleanup(dm->ddev);
3872 drm_atomic_private_obj_fini(&dm->atomic_obj);
3876 /******************************************************************************
3877 * amdgpu_display_funcs functions
3878 *****************************************************************************/
3881 * dm_bandwidth_update - program display watermarks
3883 * @adev: amdgpu_device pointer
3885 * Calculate and program the display watermarks and line buffer allocation.
3887 static void dm_bandwidth_update(struct amdgpu_device *adev)
3889 /* TODO: implement later */
3892 static const struct amdgpu_display_funcs dm_display_funcs = {
3893 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3894 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3895 .backlight_set_level = NULL, /* never called for DC */
3896 .backlight_get_level = NULL, /* never called for DC */
3897 .hpd_sense = NULL,/* called unconditionally */
3898 .hpd_set_polarity = NULL, /* called unconditionally */
3899 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3900 .page_flip_get_scanoutpos =
3901 dm_crtc_get_scanoutpos,/* called unconditionally */
3902 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3903 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3906 #if defined(CONFIG_DEBUG_KERNEL_DC)
3908 static ssize_t s3_debug_store(struct device *device,
3909 struct device_attribute *attr,
3915 struct drm_device *drm_dev = dev_get_drvdata(device);
3916 struct amdgpu_device *adev = drm_to_adev(drm_dev);
3918 ret = kstrtoint(buf, 0, &s3_state);
3923 drm_kms_helper_hotplug_event(adev_to_drm(adev));
3928 return ret == 0 ? count : 0;
3931 DEVICE_ATTR_WO(s3_debug);
3935 static int dm_early_init(void *handle)
3937 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3939 switch (adev->asic_type) {
3940 #if defined(CONFIG_DRM_AMD_DC_SI)
3944 adev->mode_info.num_crtc = 6;
3945 adev->mode_info.num_hpd = 6;
3946 adev->mode_info.num_dig = 6;
3949 adev->mode_info.num_crtc = 2;
3950 adev->mode_info.num_hpd = 2;
3951 adev->mode_info.num_dig = 2;
3956 adev->mode_info.num_crtc = 6;
3957 adev->mode_info.num_hpd = 6;
3958 adev->mode_info.num_dig = 6;
3961 adev->mode_info.num_crtc = 4;
3962 adev->mode_info.num_hpd = 6;
3963 adev->mode_info.num_dig = 7;
3967 adev->mode_info.num_crtc = 2;
3968 adev->mode_info.num_hpd = 6;
3969 adev->mode_info.num_dig = 6;
3973 adev->mode_info.num_crtc = 6;
3974 adev->mode_info.num_hpd = 6;
3975 adev->mode_info.num_dig = 7;
3978 adev->mode_info.num_crtc = 3;
3979 adev->mode_info.num_hpd = 6;
3980 adev->mode_info.num_dig = 9;
3983 adev->mode_info.num_crtc = 2;
3984 adev->mode_info.num_hpd = 6;
3985 adev->mode_info.num_dig = 9;
3987 case CHIP_POLARIS11:
3988 case CHIP_POLARIS12:
3989 adev->mode_info.num_crtc = 5;
3990 adev->mode_info.num_hpd = 5;
3991 adev->mode_info.num_dig = 5;
3993 case CHIP_POLARIS10:
3995 adev->mode_info.num_crtc = 6;
3996 adev->mode_info.num_hpd = 6;
3997 adev->mode_info.num_dig = 6;
4002 adev->mode_info.num_crtc = 6;
4003 adev->mode_info.num_hpd = 6;
4004 adev->mode_info.num_dig = 6;
4006 #if defined(CONFIG_DRM_AMD_DC_DCN)
4010 adev->mode_info.num_crtc = 4;
4011 adev->mode_info.num_hpd = 4;
4012 adev->mode_info.num_dig = 4;
4016 case CHIP_SIENNA_CICHLID:
4017 case CHIP_NAVY_FLOUNDER:
4018 adev->mode_info.num_crtc = 6;
4019 adev->mode_info.num_hpd = 6;
4020 adev->mode_info.num_dig = 6;
4023 case CHIP_DIMGREY_CAVEFISH:
4024 adev->mode_info.num_crtc = 5;
4025 adev->mode_info.num_hpd = 5;
4026 adev->mode_info.num_dig = 5;
4028 case CHIP_BEIGE_GOBY:
4029 adev->mode_info.num_crtc = 2;
4030 adev->mode_info.num_hpd = 2;
4031 adev->mode_info.num_dig = 2;
4035 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4039 amdgpu_dm_set_irq_funcs(adev);
4041 if (adev->mode_info.funcs == NULL)
4042 adev->mode_info.funcs = &dm_display_funcs;
4045 * Note: Do NOT change adev->audio_endpt_rreg and
4046 * adev->audio_endpt_wreg because they are initialised in
4047 * amdgpu_device_init()
4049 #if defined(CONFIG_DEBUG_KERNEL_DC)
4051 adev_to_drm(adev)->dev,
4052 &dev_attr_s3_debug);
4058 static bool modeset_required(struct drm_crtc_state *crtc_state,
4059 struct dc_stream_state *new_stream,
4060 struct dc_stream_state *old_stream)
4062 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4065 static bool modereset_required(struct drm_crtc_state *crtc_state)
4067 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4070 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4072 drm_encoder_cleanup(encoder);
4076 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4077 .destroy = amdgpu_dm_encoder_destroy,
4081 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4082 struct drm_framebuffer *fb,
4083 int *min_downscale, int *max_upscale)
4085 struct amdgpu_device *adev = drm_to_adev(dev);
4086 struct dc *dc = adev->dm.dc;
4087 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4088 struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4090 switch (fb->format->format) {
4091 case DRM_FORMAT_P010:
4092 case DRM_FORMAT_NV12:
4093 case DRM_FORMAT_NV21:
4094 *max_upscale = plane_cap->max_upscale_factor.nv12;
4095 *min_downscale = plane_cap->max_downscale_factor.nv12;
4098 case DRM_FORMAT_XRGB16161616F:
4099 case DRM_FORMAT_ARGB16161616F:
4100 case DRM_FORMAT_XBGR16161616F:
4101 case DRM_FORMAT_ABGR16161616F:
4102 *max_upscale = plane_cap->max_upscale_factor.fp16;
4103 *min_downscale = plane_cap->max_downscale_factor.fp16;
4107 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4108 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4113 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4114 * scaling factor of 1.0 == 1000 units.
4116 if (*max_upscale == 1)
4117 *max_upscale = 1000;
4119 if (*min_downscale == 1)
4120 *min_downscale = 1000;
4124 static int fill_dc_scaling_info(const struct drm_plane_state *state,
4125 struct dc_scaling_info *scaling_info)
4127 int scale_w, scale_h, min_downscale, max_upscale;
4129 memset(scaling_info, 0, sizeof(*scaling_info));
4131 /* Source is fixed 16.16 but we ignore mantissa for now... */
4132 scaling_info->src_rect.x = state->src_x >> 16;
4133 scaling_info->src_rect.y = state->src_y >> 16;
4136 * For reasons we don't (yet) fully understand a non-zero
4137 * src_y coordinate into an NV12 buffer can cause a
4138 * system hang. To avoid hangs (and maybe be overly cautious)
4139 * let's reject both non-zero src_x and src_y.
4141 * We currently know of only one use-case to reproduce a
4142 * scenario with non-zero src_x and src_y for NV12, which
4143 * is to gesture the YouTube Android app into full screen
4147 state->fb->format->format == DRM_FORMAT_NV12 &&
4148 (scaling_info->src_rect.x != 0 ||
4149 scaling_info->src_rect.y != 0))
4152 scaling_info->src_rect.width = state->src_w >> 16;
4153 if (scaling_info->src_rect.width == 0)
4156 scaling_info->src_rect.height = state->src_h >> 16;
4157 if (scaling_info->src_rect.height == 0)
4160 scaling_info->dst_rect.x = state->crtc_x;
4161 scaling_info->dst_rect.y = state->crtc_y;
4163 if (state->crtc_w == 0)
4166 scaling_info->dst_rect.width = state->crtc_w;
4168 if (state->crtc_h == 0)
4171 scaling_info->dst_rect.height = state->crtc_h;
4173 /* DRM doesn't specify clipping on destination output. */
4174 scaling_info->clip_rect = scaling_info->dst_rect;
4176 /* Validate scaling per-format with DC plane caps */
4177 if (state->plane && state->plane->dev && state->fb) {
4178 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4179 &min_downscale, &max_upscale);
4181 min_downscale = 250;
4182 max_upscale = 16000;
4185 scale_w = scaling_info->dst_rect.width * 1000 /
4186 scaling_info->src_rect.width;
4188 if (scale_w < min_downscale || scale_w > max_upscale)
4191 scale_h = scaling_info->dst_rect.height * 1000 /
4192 scaling_info->src_rect.height;
4194 if (scale_h < min_downscale || scale_h > max_upscale)
4198 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4199 * assume reasonable defaults based on the format.
4206 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4207 uint64_t tiling_flags)
4209 /* Fill GFX8 params */
4210 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4211 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4213 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4214 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4215 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4216 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4217 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4219 /* XXX fix me for VI */
4220 tiling_info->gfx8.num_banks = num_banks;
4221 tiling_info->gfx8.array_mode =
4222 DC_ARRAY_2D_TILED_THIN1;
4223 tiling_info->gfx8.tile_split = tile_split;
4224 tiling_info->gfx8.bank_width = bankw;
4225 tiling_info->gfx8.bank_height = bankh;
4226 tiling_info->gfx8.tile_aspect = mtaspect;
4227 tiling_info->gfx8.tile_mode =
4228 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4229 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4230 == DC_ARRAY_1D_TILED_THIN1) {
4231 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4234 tiling_info->gfx8.pipe_config =
4235 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4239 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4240 union dc_tiling_info *tiling_info)
4242 tiling_info->gfx9.num_pipes =
4243 adev->gfx.config.gb_addr_config_fields.num_pipes;
4244 tiling_info->gfx9.num_banks =
4245 adev->gfx.config.gb_addr_config_fields.num_banks;
4246 tiling_info->gfx9.pipe_interleave =
4247 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4248 tiling_info->gfx9.num_shader_engines =
4249 adev->gfx.config.gb_addr_config_fields.num_se;
4250 tiling_info->gfx9.max_compressed_frags =
4251 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4252 tiling_info->gfx9.num_rb_per_se =
4253 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4254 tiling_info->gfx9.shaderEnable = 1;
4255 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4256 adev->asic_type == CHIP_NAVY_FLOUNDER ||
4257 adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4258 adev->asic_type == CHIP_BEIGE_GOBY ||
4259 adev->asic_type == CHIP_VANGOGH)
4260 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4264 validate_dcc(struct amdgpu_device *adev,
4265 const enum surface_pixel_format format,
4266 const enum dc_rotation_angle rotation,
4267 const union dc_tiling_info *tiling_info,
4268 const struct dc_plane_dcc_param *dcc,
4269 const struct dc_plane_address *address,
4270 const struct plane_size *plane_size)
4272 struct dc *dc = adev->dm.dc;
4273 struct dc_dcc_surface_param input;
4274 struct dc_surface_dcc_cap output;
4276 memset(&input, 0, sizeof(input));
4277 memset(&output, 0, sizeof(output));
4282 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4283 !dc->cap_funcs.get_dcc_compression_cap)
4286 input.format = format;
4287 input.surface_size.width = plane_size->surface_size.width;
4288 input.surface_size.height = plane_size->surface_size.height;
4289 input.swizzle_mode = tiling_info->gfx9.swizzle;
4291 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4292 input.scan = SCAN_DIRECTION_HORIZONTAL;
4293 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4294 input.scan = SCAN_DIRECTION_VERTICAL;
4296 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4299 if (!output.capable)
4302 if (dcc->independent_64b_blks == 0 &&
4303 output.grph.rgb.independent_64b_blks != 0)
4310 modifier_has_dcc(uint64_t modifier)
4312 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4316 modifier_gfx9_swizzle_mode(uint64_t modifier)
4318 if (modifier == DRM_FORMAT_MOD_LINEAR)
4321 return AMD_FMT_MOD_GET(TILE, modifier);
4324 static const struct drm_format_info *
4325 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4327 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4331 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4332 union dc_tiling_info *tiling_info,
4335 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4336 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4337 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4338 unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4340 fill_gfx9_tiling_info_from_device(adev, tiling_info);
4342 if (!IS_AMD_FMT_MOD(modifier))
4345 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4346 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4348 if (adev->family >= AMDGPU_FAMILY_NV) {
4349 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4351 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4353 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4357 enum dm_micro_swizzle {
4358 MICRO_SWIZZLE_Z = 0,
4359 MICRO_SWIZZLE_S = 1,
4360 MICRO_SWIZZLE_D = 2,
4364 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4368 struct amdgpu_device *adev = drm_to_adev(plane->dev);
4369 const struct drm_format_info *info = drm_format_info(format);
4372 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4378 * We always have to allow these modifiers:
4379 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4380 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4382 if (modifier == DRM_FORMAT_MOD_LINEAR ||
4383 modifier == DRM_FORMAT_MOD_INVALID) {
4387 /* Check that the modifier is on the list of the plane's supported modifiers. */
4388 for (i = 0; i < plane->modifier_count; i++) {
4389 if (modifier == plane->modifiers[i])
4392 if (i == plane->modifier_count)
4396 * For D swizzle the canonical modifier depends on the bpp, so check
4399 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4400 adev->family >= AMDGPU_FAMILY_NV) {
4401 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4405 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4409 if (modifier_has_dcc(modifier)) {
4410 /* Per radeonsi comments 16/64 bpp are more complicated. */
4411 if (info->cpp[0] != 4)
4413 /* We support multi-planar formats, but not when combined with
4414 * additional DCC metadata planes. */
4415 if (info->num_planes > 1)
4423 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4428 if (*cap - *size < 1) {
4429 uint64_t new_cap = *cap * 2;
4430 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4438 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4444 (*mods)[*size] = mod;
4449 add_gfx9_modifiers(const struct amdgpu_device *adev,
4450 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4452 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4453 int pipe_xor_bits = min(8, pipes +
4454 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4455 int bank_xor_bits = min(8 - pipe_xor_bits,
4456 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4457 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4458 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4461 if (adev->family == AMDGPU_FAMILY_RV) {
4462 /* Raven2 and later */
4463 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4466 * No _D DCC swizzles yet because we only allow 32bpp, which
4467 * doesn't support _D on DCN
4470 if (has_constant_encode) {
4471 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4472 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4473 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4474 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4475 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4476 AMD_FMT_MOD_SET(DCC, 1) |
4477 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4478 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4479 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4482 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4483 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4484 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4485 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4486 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4487 AMD_FMT_MOD_SET(DCC, 1) |
4488 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4489 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4490 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4492 if (has_constant_encode) {
4493 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4494 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4495 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4496 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4497 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4498 AMD_FMT_MOD_SET(DCC, 1) |
4499 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4500 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4501 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4503 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4504 AMD_FMT_MOD_SET(RB, rb) |
4505 AMD_FMT_MOD_SET(PIPE, pipes));
4508 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4509 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4510 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4511 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4512 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4513 AMD_FMT_MOD_SET(DCC, 1) |
4514 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4515 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4516 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4517 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4518 AMD_FMT_MOD_SET(RB, rb) |
4519 AMD_FMT_MOD_SET(PIPE, pipes));
4523 * Only supported for 64bpp on Raven, will be filtered on format in
4524 * dm_plane_format_mod_supported.
4526 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4527 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4528 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4529 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4530 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4532 if (adev->family == AMDGPU_FAMILY_RV) {
4533 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4534 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4535 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4536 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4537 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4541 * Only supported for 64bpp on Raven, will be filtered on format in
4542 * dm_plane_format_mod_supported.
4544 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4545 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4546 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4548 if (adev->family == AMDGPU_FAMILY_RV) {
4549 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4550 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4551 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4556 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4557 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4559 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4561 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4562 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4563 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4564 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4565 AMD_FMT_MOD_SET(DCC, 1) |
4566 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4567 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4568 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4570 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4571 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4572 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4573 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4574 AMD_FMT_MOD_SET(DCC, 1) |
4575 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4576 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4577 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4578 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4580 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4581 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4582 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4583 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4585 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4586 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4587 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4588 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4591 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4592 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4593 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4594 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4596 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4597 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4598 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4602 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4603 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4605 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4606 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4608 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4609 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4610 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4611 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4612 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4613 AMD_FMT_MOD_SET(DCC, 1) |
4614 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4615 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4616 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4617 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4619 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4620 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4621 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4622 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4623 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4624 AMD_FMT_MOD_SET(DCC, 1) |
4625 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4626 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4627 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4628 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4629 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4631 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4632 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4633 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4634 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4635 AMD_FMT_MOD_SET(PACKERS, pkrs));
4637 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4638 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4639 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4640 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4641 AMD_FMT_MOD_SET(PACKERS, pkrs));
4643 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4644 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4645 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4646 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4648 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4649 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4650 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4654 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4656 uint64_t size = 0, capacity = 128;
4659 /* We have not hooked up any pre-GFX9 modifiers. */
4660 if (adev->family < AMDGPU_FAMILY_AI)
4663 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4665 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4666 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4667 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4668 return *mods ? 0 : -ENOMEM;
4671 switch (adev->family) {
4672 case AMDGPU_FAMILY_AI:
4673 case AMDGPU_FAMILY_RV:
4674 add_gfx9_modifiers(adev, mods, &size, &capacity);
4676 case AMDGPU_FAMILY_NV:
4677 case AMDGPU_FAMILY_VGH:
4678 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4679 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4681 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4685 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4687 /* INVALID marks the end of the list. */
4688 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4697 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4698 const struct amdgpu_framebuffer *afb,
4699 const enum surface_pixel_format format,
4700 const enum dc_rotation_angle rotation,
4701 const struct plane_size *plane_size,
4702 union dc_tiling_info *tiling_info,
4703 struct dc_plane_dcc_param *dcc,
4704 struct dc_plane_address *address,
4705 const bool force_disable_dcc)
4707 const uint64_t modifier = afb->base.modifier;
4710 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4711 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4713 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4714 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4717 dcc->meta_pitch = afb->base.pitches[1];
4718 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4720 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4721 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4724 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4732 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4733 const struct amdgpu_framebuffer *afb,
4734 const enum surface_pixel_format format,
4735 const enum dc_rotation_angle rotation,
4736 const uint64_t tiling_flags,
4737 union dc_tiling_info *tiling_info,
4738 struct plane_size *plane_size,
4739 struct dc_plane_dcc_param *dcc,
4740 struct dc_plane_address *address,
4742 bool force_disable_dcc)
4744 const struct drm_framebuffer *fb = &afb->base;
4747 memset(tiling_info, 0, sizeof(*tiling_info));
4748 memset(plane_size, 0, sizeof(*plane_size));
4749 memset(dcc, 0, sizeof(*dcc));
4750 memset(address, 0, sizeof(*address));
4752 address->tmz_surface = tmz_surface;
4754 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4755 uint64_t addr = afb->address + fb->offsets[0];
4757 plane_size->surface_size.x = 0;
4758 plane_size->surface_size.y = 0;
4759 plane_size->surface_size.width = fb->width;
4760 plane_size->surface_size.height = fb->height;
4761 plane_size->surface_pitch =
4762 fb->pitches[0] / fb->format->cpp[0];
4764 address->type = PLN_ADDR_TYPE_GRAPHICS;
4765 address->grph.addr.low_part = lower_32_bits(addr);
4766 address->grph.addr.high_part = upper_32_bits(addr);
4767 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4768 uint64_t luma_addr = afb->address + fb->offsets[0];
4769 uint64_t chroma_addr = afb->address + fb->offsets[1];
4771 plane_size->surface_size.x = 0;
4772 plane_size->surface_size.y = 0;
4773 plane_size->surface_size.width = fb->width;
4774 plane_size->surface_size.height = fb->height;
4775 plane_size->surface_pitch =
4776 fb->pitches[0] / fb->format->cpp[0];
4778 plane_size->chroma_size.x = 0;
4779 plane_size->chroma_size.y = 0;
4780 /* TODO: set these based on surface format */
4781 plane_size->chroma_size.width = fb->width / 2;
4782 plane_size->chroma_size.height = fb->height / 2;
4784 plane_size->chroma_pitch =
4785 fb->pitches[1] / fb->format->cpp[1];
4787 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4788 address->video_progressive.luma_addr.low_part =
4789 lower_32_bits(luma_addr);
4790 address->video_progressive.luma_addr.high_part =
4791 upper_32_bits(luma_addr);
4792 address->video_progressive.chroma_addr.low_part =
4793 lower_32_bits(chroma_addr);
4794 address->video_progressive.chroma_addr.high_part =
4795 upper_32_bits(chroma_addr);
4798 if (adev->family >= AMDGPU_FAMILY_AI) {
4799 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4800 rotation, plane_size,
4807 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4814 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4815 bool *per_pixel_alpha, bool *global_alpha,
4816 int *global_alpha_value)
4818 *per_pixel_alpha = false;
4819 *global_alpha = false;
4820 *global_alpha_value = 0xff;
4822 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4825 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4826 static const uint32_t alpha_formats[] = {
4827 DRM_FORMAT_ARGB8888,
4828 DRM_FORMAT_RGBA8888,
4829 DRM_FORMAT_ABGR8888,
4831 uint32_t format = plane_state->fb->format->format;
4834 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4835 if (format == alpha_formats[i]) {
4836 *per_pixel_alpha = true;
4842 if (plane_state->alpha < 0xffff) {
4843 *global_alpha = true;
4844 *global_alpha_value = plane_state->alpha >> 8;
4849 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4850 const enum surface_pixel_format format,
4851 enum dc_color_space *color_space)
4855 *color_space = COLOR_SPACE_SRGB;
4857 /* DRM color properties only affect non-RGB formats. */
4858 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4861 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4863 switch (plane_state->color_encoding) {
4864 case DRM_COLOR_YCBCR_BT601:
4866 *color_space = COLOR_SPACE_YCBCR601;
4868 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4871 case DRM_COLOR_YCBCR_BT709:
4873 *color_space = COLOR_SPACE_YCBCR709;
4875 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4878 case DRM_COLOR_YCBCR_BT2020:
4880 *color_space = COLOR_SPACE_2020_YCBCR;
4893 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4894 const struct drm_plane_state *plane_state,
4895 const uint64_t tiling_flags,
4896 struct dc_plane_info *plane_info,
4897 struct dc_plane_address *address,
4899 bool force_disable_dcc)
4901 const struct drm_framebuffer *fb = plane_state->fb;
4902 const struct amdgpu_framebuffer *afb =
4903 to_amdgpu_framebuffer(plane_state->fb);
4906 memset(plane_info, 0, sizeof(*plane_info));
4908 switch (fb->format->format) {
4910 plane_info->format =
4911 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4913 case DRM_FORMAT_RGB565:
4914 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4916 case DRM_FORMAT_XRGB8888:
4917 case DRM_FORMAT_ARGB8888:
4918 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4920 case DRM_FORMAT_XRGB2101010:
4921 case DRM_FORMAT_ARGB2101010:
4922 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4924 case DRM_FORMAT_XBGR2101010:
4925 case DRM_FORMAT_ABGR2101010:
4926 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4928 case DRM_FORMAT_XBGR8888:
4929 case DRM_FORMAT_ABGR8888:
4930 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4932 case DRM_FORMAT_NV21:
4933 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4935 case DRM_FORMAT_NV12:
4936 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4938 case DRM_FORMAT_P010:
4939 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4941 case DRM_FORMAT_XRGB16161616F:
4942 case DRM_FORMAT_ARGB16161616F:
4943 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4945 case DRM_FORMAT_XBGR16161616F:
4946 case DRM_FORMAT_ABGR16161616F:
4947 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4951 "Unsupported screen format %p4cc\n",
4952 &fb->format->format);
4956 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4957 case DRM_MODE_ROTATE_0:
4958 plane_info->rotation = ROTATION_ANGLE_0;
4960 case DRM_MODE_ROTATE_90:
4961 plane_info->rotation = ROTATION_ANGLE_90;
4963 case DRM_MODE_ROTATE_180:
4964 plane_info->rotation = ROTATION_ANGLE_180;
4966 case DRM_MODE_ROTATE_270:
4967 plane_info->rotation = ROTATION_ANGLE_270;
4970 plane_info->rotation = ROTATION_ANGLE_0;
4974 plane_info->visible = true;
4975 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4977 plane_info->layer_index = 0;
4979 ret = fill_plane_color_attributes(plane_state, plane_info->format,
4980 &plane_info->color_space);
4984 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4985 plane_info->rotation, tiling_flags,
4986 &plane_info->tiling_info,
4987 &plane_info->plane_size,
4988 &plane_info->dcc, address, tmz_surface,
4993 fill_blending_from_plane_state(
4994 plane_state, &plane_info->per_pixel_alpha,
4995 &plane_info->global_alpha, &plane_info->global_alpha_value);
5000 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5001 struct dc_plane_state *dc_plane_state,
5002 struct drm_plane_state *plane_state,
5003 struct drm_crtc_state *crtc_state)
5005 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5006 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5007 struct dc_scaling_info scaling_info;
5008 struct dc_plane_info plane_info;
5010 bool force_disable_dcc = false;
5012 ret = fill_dc_scaling_info(plane_state, &scaling_info);
5016 dc_plane_state->src_rect = scaling_info.src_rect;
5017 dc_plane_state->dst_rect = scaling_info.dst_rect;
5018 dc_plane_state->clip_rect = scaling_info.clip_rect;
5019 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5021 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5022 ret = fill_dc_plane_info_and_addr(adev, plane_state,
5025 &dc_plane_state->address,
5031 dc_plane_state->format = plane_info.format;
5032 dc_plane_state->color_space = plane_info.color_space;
5033 dc_plane_state->format = plane_info.format;
5034 dc_plane_state->plane_size = plane_info.plane_size;
5035 dc_plane_state->rotation = plane_info.rotation;
5036 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5037 dc_plane_state->stereo_format = plane_info.stereo_format;
5038 dc_plane_state->tiling_info = plane_info.tiling_info;
5039 dc_plane_state->visible = plane_info.visible;
5040 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5041 dc_plane_state->global_alpha = plane_info.global_alpha;
5042 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5043 dc_plane_state->dcc = plane_info.dcc;
5044 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5045 dc_plane_state->flip_int_enabled = true;
5048 * Always set input transfer function, since plane state is refreshed
5051 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5058 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5059 const struct dm_connector_state *dm_state,
5060 struct dc_stream_state *stream)
5062 enum amdgpu_rmx_type rmx_type;
5064 struct rect src = { 0 }; /* viewport in composition space*/
5065 struct rect dst = { 0 }; /* stream addressable area */
5067 /* no mode. nothing to be done */
5071 /* Full screen scaling by default */
5072 src.width = mode->hdisplay;
5073 src.height = mode->vdisplay;
5074 dst.width = stream->timing.h_addressable;
5075 dst.height = stream->timing.v_addressable;
5078 rmx_type = dm_state->scaling;
5079 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5080 if (src.width * dst.height <
5081 src.height * dst.width) {
5082 /* height needs less upscaling/more downscaling */
5083 dst.width = src.width *
5084 dst.height / src.height;
5086 /* width needs less upscaling/more downscaling */
5087 dst.height = src.height *
5088 dst.width / src.width;
5090 } else if (rmx_type == RMX_CENTER) {
5094 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5095 dst.y = (stream->timing.v_addressable - dst.height) / 2;
5097 if (dm_state->underscan_enable) {
5098 dst.x += dm_state->underscan_hborder / 2;
5099 dst.y += dm_state->underscan_vborder / 2;
5100 dst.width -= dm_state->underscan_hborder;
5101 dst.height -= dm_state->underscan_vborder;
5108 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
5109 dst.x, dst.y, dst.width, dst.height);
5113 static enum dc_color_depth
5114 convert_color_depth_from_display_info(const struct drm_connector *connector,
5115 bool is_y420, int requested_bpc)
5122 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5123 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5125 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5127 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5130 bpc = (uint8_t)connector->display_info.bpc;
5131 /* Assume 8 bpc by default if no bpc is specified. */
5132 bpc = bpc ? bpc : 8;
5135 if (requested_bpc > 0) {
5137 * Cap display bpc based on the user requested value.
5139 * The value for state->max_bpc may not correctly updated
5140 * depending on when the connector gets added to the state
5141 * or if this was called outside of atomic check, so it
5142 * can't be used directly.
5144 bpc = min_t(u8, bpc, requested_bpc);
5146 /* Round down to the nearest even number. */
5147 bpc = bpc - (bpc & 1);
5153 * Temporary Work around, DRM doesn't parse color depth for
5154 * EDID revision before 1.4
5155 * TODO: Fix edid parsing
5157 return COLOR_DEPTH_888;
5159 return COLOR_DEPTH_666;
5161 return COLOR_DEPTH_888;
5163 return COLOR_DEPTH_101010;
5165 return COLOR_DEPTH_121212;
5167 return COLOR_DEPTH_141414;
5169 return COLOR_DEPTH_161616;
5171 return COLOR_DEPTH_UNDEFINED;
5175 static enum dc_aspect_ratio
5176 get_aspect_ratio(const struct drm_display_mode *mode_in)
5178 /* 1-1 mapping, since both enums follow the HDMI spec. */
5179 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5182 static enum dc_color_space
5183 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5185 enum dc_color_space color_space = COLOR_SPACE_SRGB;
5187 switch (dc_crtc_timing->pixel_encoding) {
5188 case PIXEL_ENCODING_YCBCR422:
5189 case PIXEL_ENCODING_YCBCR444:
5190 case PIXEL_ENCODING_YCBCR420:
5193 * 27030khz is the separation point between HDTV and SDTV
5194 * according to HDMI spec, we use YCbCr709 and YCbCr601
5197 if (dc_crtc_timing->pix_clk_100hz > 270300) {
5198 if (dc_crtc_timing->flags.Y_ONLY)
5200 COLOR_SPACE_YCBCR709_LIMITED;
5202 color_space = COLOR_SPACE_YCBCR709;
5204 if (dc_crtc_timing->flags.Y_ONLY)
5206 COLOR_SPACE_YCBCR601_LIMITED;
5208 color_space = COLOR_SPACE_YCBCR601;
5213 case PIXEL_ENCODING_RGB:
5214 color_space = COLOR_SPACE_SRGB;
5225 static bool adjust_colour_depth_from_display_info(
5226 struct dc_crtc_timing *timing_out,
5227 const struct drm_display_info *info)
5229 enum dc_color_depth depth = timing_out->display_color_depth;
5232 normalized_clk = timing_out->pix_clk_100hz / 10;
5233 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5234 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5235 normalized_clk /= 2;
5236 /* Adjusting pix clock following on HDMI spec based on colour depth */
5238 case COLOR_DEPTH_888:
5240 case COLOR_DEPTH_101010:
5241 normalized_clk = (normalized_clk * 30) / 24;
5243 case COLOR_DEPTH_121212:
5244 normalized_clk = (normalized_clk * 36) / 24;
5246 case COLOR_DEPTH_161616:
5247 normalized_clk = (normalized_clk * 48) / 24;
5250 /* The above depths are the only ones valid for HDMI. */
5253 if (normalized_clk <= info->max_tmds_clock) {
5254 timing_out->display_color_depth = depth;
5257 } while (--depth > COLOR_DEPTH_666);
5261 static void fill_stream_properties_from_drm_display_mode(
5262 struct dc_stream_state *stream,
5263 const struct drm_display_mode *mode_in,
5264 const struct drm_connector *connector,
5265 const struct drm_connector_state *connector_state,
5266 const struct dc_stream_state *old_stream,
5269 struct dc_crtc_timing *timing_out = &stream->timing;
5270 const struct drm_display_info *info = &connector->display_info;
5271 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5272 struct hdmi_vendor_infoframe hv_frame;
5273 struct hdmi_avi_infoframe avi_frame;
5275 memset(&hv_frame, 0, sizeof(hv_frame));
5276 memset(&avi_frame, 0, sizeof(avi_frame));
5278 timing_out->h_border_left = 0;
5279 timing_out->h_border_right = 0;
5280 timing_out->v_border_top = 0;
5281 timing_out->v_border_bottom = 0;
5282 /* TODO: un-hardcode */
5283 if (drm_mode_is_420_only(info, mode_in)
5284 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5285 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5286 else if (drm_mode_is_420_also(info, mode_in)
5287 && aconnector->force_yuv420_output)
5288 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5289 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5290 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5291 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5293 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5295 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5296 timing_out->display_color_depth = convert_color_depth_from_display_info(
5298 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5300 timing_out->scan_type = SCANNING_TYPE_NODATA;
5301 timing_out->hdmi_vic = 0;
5304 timing_out->vic = old_stream->timing.vic;
5305 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5306 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5308 timing_out->vic = drm_match_cea_mode(mode_in);
5309 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5310 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5311 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5312 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5315 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5316 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5317 timing_out->vic = avi_frame.video_code;
5318 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5319 timing_out->hdmi_vic = hv_frame.vic;
5322 if (is_freesync_video_mode(mode_in, aconnector)) {
5323 timing_out->h_addressable = mode_in->hdisplay;
5324 timing_out->h_total = mode_in->htotal;
5325 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5326 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5327 timing_out->v_total = mode_in->vtotal;
5328 timing_out->v_addressable = mode_in->vdisplay;
5329 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5330 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5331 timing_out->pix_clk_100hz = mode_in->clock * 10;
5333 timing_out->h_addressable = mode_in->crtc_hdisplay;
5334 timing_out->h_total = mode_in->crtc_htotal;
5335 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5336 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5337 timing_out->v_total = mode_in->crtc_vtotal;
5338 timing_out->v_addressable = mode_in->crtc_vdisplay;
5339 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5340 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5341 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5344 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5346 stream->output_color_space = get_output_color_space(timing_out);
5348 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5349 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5350 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5351 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5352 drm_mode_is_420_also(info, mode_in) &&
5353 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5354 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5355 adjust_colour_depth_from_display_info(timing_out, info);
5360 static void fill_audio_info(struct audio_info *audio_info,
5361 const struct drm_connector *drm_connector,
5362 const struct dc_sink *dc_sink)
5365 int cea_revision = 0;
5366 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5368 audio_info->manufacture_id = edid_caps->manufacturer_id;
5369 audio_info->product_id = edid_caps->product_id;
5371 cea_revision = drm_connector->display_info.cea_rev;
5373 strscpy(audio_info->display_name,
5374 edid_caps->display_name,
5375 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5377 if (cea_revision >= 3) {
5378 audio_info->mode_count = edid_caps->audio_mode_count;
5380 for (i = 0; i < audio_info->mode_count; ++i) {
5381 audio_info->modes[i].format_code =
5382 (enum audio_format_code)
5383 (edid_caps->audio_modes[i].format_code);
5384 audio_info->modes[i].channel_count =
5385 edid_caps->audio_modes[i].channel_count;
5386 audio_info->modes[i].sample_rates.all =
5387 edid_caps->audio_modes[i].sample_rate;
5388 audio_info->modes[i].sample_size =
5389 edid_caps->audio_modes[i].sample_size;
5393 audio_info->flags.all = edid_caps->speaker_flags;
5395 /* TODO: We only check for the progressive mode, check for interlace mode too */
5396 if (drm_connector->latency_present[0]) {
5397 audio_info->video_latency = drm_connector->video_latency[0];
5398 audio_info->audio_latency = drm_connector->audio_latency[0];
5401 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5406 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5407 struct drm_display_mode *dst_mode)
5409 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5410 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5411 dst_mode->crtc_clock = src_mode->crtc_clock;
5412 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5413 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5414 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
5415 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5416 dst_mode->crtc_htotal = src_mode->crtc_htotal;
5417 dst_mode->crtc_hskew = src_mode->crtc_hskew;
5418 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5419 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5420 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5421 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5422 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5426 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5427 const struct drm_display_mode *native_mode,
5430 if (scale_enabled) {
5431 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5432 } else if (native_mode->clock == drm_mode->clock &&
5433 native_mode->htotal == drm_mode->htotal &&
5434 native_mode->vtotal == drm_mode->vtotal) {
5435 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5437 /* no scaling nor amdgpu inserted, no need to patch */
5441 static struct dc_sink *
5442 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5444 struct dc_sink_init_data sink_init_data = { 0 };
5445 struct dc_sink *sink = NULL;
5446 sink_init_data.link = aconnector->dc_link;
5447 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5449 sink = dc_sink_create(&sink_init_data);
5451 DRM_ERROR("Failed to create sink!\n");
5454 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5459 static void set_multisync_trigger_params(
5460 struct dc_stream_state *stream)
5462 struct dc_stream_state *master = NULL;
5464 if (stream->triggered_crtc_reset.enabled) {
5465 master = stream->triggered_crtc_reset.event_source;
5466 stream->triggered_crtc_reset.event =
5467 master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5468 CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5469 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5473 static void set_master_stream(struct dc_stream_state *stream_set[],
5476 int j, highest_rfr = 0, master_stream = 0;
5478 for (j = 0; j < stream_count; j++) {
5479 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5480 int refresh_rate = 0;
5482 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5483 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5484 if (refresh_rate > highest_rfr) {
5485 highest_rfr = refresh_rate;
5490 for (j = 0; j < stream_count; j++) {
5492 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5496 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5499 struct dc_stream_state *stream;
5501 if (context->stream_count < 2)
5503 for (i = 0; i < context->stream_count ; i++) {
5504 if (!context->streams[i])
5507 * TODO: add a function to read AMD VSDB bits and set
5508 * crtc_sync_master.multi_sync_enabled flag
5509 * For now it's set to false
5513 set_master_stream(context->streams, context->stream_count);
5515 for (i = 0; i < context->stream_count ; i++) {
5516 stream = context->streams[i];
5521 set_multisync_trigger_params(stream);
5525 static struct drm_display_mode *
5526 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5527 bool use_probed_modes)
5529 struct drm_display_mode *m, *m_pref = NULL;
5530 u16 current_refresh, highest_refresh;
5531 struct list_head *list_head = use_probed_modes ?
5532 &aconnector->base.probed_modes :
5533 &aconnector->base.modes;
5535 if (aconnector->freesync_vid_base.clock != 0)
5536 return &aconnector->freesync_vid_base;
5538 /* Find the preferred mode */
5539 list_for_each_entry (m, list_head, head) {
5540 if (m->type & DRM_MODE_TYPE_PREFERRED) {
5547 /* Probably an EDID with no preferred mode. Fallback to first entry */
5548 m_pref = list_first_entry_or_null(
5549 &aconnector->base.modes, struct drm_display_mode, head);
5551 DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5556 highest_refresh = drm_mode_vrefresh(m_pref);
5559 * Find the mode with highest refresh rate with same resolution.
5560 * For some monitors, preferred mode is not the mode with highest
5561 * supported refresh rate.
5563 list_for_each_entry (m, list_head, head) {
5564 current_refresh = drm_mode_vrefresh(m);
5566 if (m->hdisplay == m_pref->hdisplay &&
5567 m->vdisplay == m_pref->vdisplay &&
5568 highest_refresh < current_refresh) {
5569 highest_refresh = current_refresh;
5574 aconnector->freesync_vid_base = *m_pref;
5578 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
5579 struct amdgpu_dm_connector *aconnector)
5581 struct drm_display_mode *high_mode;
5584 high_mode = get_highest_refresh_rate_mode(aconnector, false);
5585 if (!high_mode || !mode)
5588 timing_diff = high_mode->vtotal - mode->vtotal;
5590 if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5591 high_mode->hdisplay != mode->hdisplay ||
5592 high_mode->vdisplay != mode->vdisplay ||
5593 high_mode->hsync_start != mode->hsync_start ||
5594 high_mode->hsync_end != mode->hsync_end ||
5595 high_mode->htotal != mode->htotal ||
5596 high_mode->hskew != mode->hskew ||
5597 high_mode->vscan != mode->vscan ||
5598 high_mode->vsync_start - mode->vsync_start != timing_diff ||
5599 high_mode->vsync_end - mode->vsync_end != timing_diff)
5605 static struct dc_stream_state *
5606 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5607 const struct drm_display_mode *drm_mode,
5608 const struct dm_connector_state *dm_state,
5609 const struct dc_stream_state *old_stream,
5612 struct drm_display_mode *preferred_mode = NULL;
5613 struct drm_connector *drm_connector;
5614 const struct drm_connector_state *con_state =
5615 dm_state ? &dm_state->base : NULL;
5616 struct dc_stream_state *stream = NULL;
5617 struct drm_display_mode mode = *drm_mode;
5618 struct drm_display_mode saved_mode;
5619 struct drm_display_mode *freesync_mode = NULL;
5620 bool native_mode_found = false;
5621 bool recalculate_timing = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5623 int preferred_refresh = 0;
5624 #if defined(CONFIG_DRM_AMD_DC_DCN)
5625 struct dsc_dec_dpcd_caps dsc_caps;
5626 uint32_t link_bandwidth_kbps;
5628 struct dc_sink *sink = NULL;
5630 memset(&saved_mode, 0, sizeof(saved_mode));
5632 if (aconnector == NULL) {
5633 DRM_ERROR("aconnector is NULL!\n");
5637 drm_connector = &aconnector->base;
5639 if (!aconnector->dc_sink) {
5640 sink = create_fake_sink(aconnector);
5644 sink = aconnector->dc_sink;
5645 dc_sink_retain(sink);
5648 stream = dc_create_stream_for_sink(sink);
5650 if (stream == NULL) {
5651 DRM_ERROR("Failed to create stream for sink!\n");
5655 stream->dm_stream_context = aconnector;
5657 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5658 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5660 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5661 /* Search for preferred mode */
5662 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5663 native_mode_found = true;
5667 if (!native_mode_found)
5668 preferred_mode = list_first_entry_or_null(
5669 &aconnector->base.modes,
5670 struct drm_display_mode,
5673 mode_refresh = drm_mode_vrefresh(&mode);
5675 if (preferred_mode == NULL) {
5677 * This may not be an error, the use case is when we have no
5678 * usermode calls to reset and set mode upon hotplug. In this
5679 * case, we call set mode ourselves to restore the previous mode
5680 * and the modelist may not be filled in in time.
5682 DRM_DEBUG_DRIVER("No preferred mode found\n");
5684 recalculate_timing |= amdgpu_freesync_vid_mode &&
5685 is_freesync_video_mode(&mode, aconnector);
5686 if (recalculate_timing) {
5687 freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5689 mode = *freesync_mode;
5691 decide_crtc_timing_for_drm_display_mode(
5692 &mode, preferred_mode,
5693 dm_state ? (dm_state->scaling != RMX_OFF) : false);
5696 preferred_refresh = drm_mode_vrefresh(preferred_mode);
5699 if (recalculate_timing)
5700 drm_mode_set_crtcinfo(&saved_mode, 0);
5702 drm_mode_set_crtcinfo(&mode, 0);
5705 * If scaling is enabled and refresh rate didn't change
5706 * we copy the vic and polarities of the old timings
5708 if (!recalculate_timing || mode_refresh != preferred_refresh)
5709 fill_stream_properties_from_drm_display_mode(
5710 stream, &mode, &aconnector->base, con_state, NULL,
5713 fill_stream_properties_from_drm_display_mode(
5714 stream, &mode, &aconnector->base, con_state, old_stream,
5717 stream->timing.flags.DSC = 0;
5719 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5720 #if defined(CONFIG_DRM_AMD_DC_DCN)
5721 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5722 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5723 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5725 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5726 dc_link_get_link_cap(aconnector->dc_link));
5728 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5729 /* Set DSC policy according to dsc_clock_en */
5730 dc_dsc_policy_set_enable_dsc_when_not_needed(
5731 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5733 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5735 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5737 link_bandwidth_kbps,
5739 &stream->timing.dsc_cfg))
5740 stream->timing.flags.DSC = 1;
5741 /* Overwrite the stream flag if DSC is enabled through debugfs */
5742 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5743 stream->timing.flags.DSC = 1;
5745 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5746 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5748 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5749 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5751 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5752 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5757 update_stream_scaling_settings(&mode, dm_state, stream);
5760 &stream->audio_info,
5764 update_stream_signal(stream, sink);
5766 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5767 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5769 if (stream->link->psr_settings.psr_feature_enabled) {
5771 // should decide stream support vsc sdp colorimetry capability
5772 // before building vsc info packet
5774 stream->use_vsc_sdp_for_colorimetry = false;
5775 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5776 stream->use_vsc_sdp_for_colorimetry =
5777 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5779 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5780 stream->use_vsc_sdp_for_colorimetry = true;
5782 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5785 dc_sink_release(sink);
5790 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5792 drm_crtc_cleanup(crtc);
5796 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5797 struct drm_crtc_state *state)
5799 struct dm_crtc_state *cur = to_dm_crtc_state(state);
5801 /* TODO Destroy dc_stream objects are stream object is flattened */
5803 dc_stream_release(cur->stream);
5806 __drm_atomic_helper_crtc_destroy_state(state);
5812 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5814 struct dm_crtc_state *state;
5817 dm_crtc_destroy_state(crtc, crtc->state);
5819 state = kzalloc(sizeof(*state), GFP_KERNEL);
5820 if (WARN_ON(!state))
5823 __drm_atomic_helper_crtc_reset(crtc, &state->base);
5826 static struct drm_crtc_state *
5827 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5829 struct dm_crtc_state *state, *cur;
5831 cur = to_dm_crtc_state(crtc->state);
5833 if (WARN_ON(!crtc->state))
5836 state = kzalloc(sizeof(*state), GFP_KERNEL);
5840 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5843 state->stream = cur->stream;
5844 dc_stream_retain(state->stream);
5847 state->active_planes = cur->active_planes;
5848 state->vrr_infopacket = cur->vrr_infopacket;
5849 state->abm_level = cur->abm_level;
5850 state->vrr_supported = cur->vrr_supported;
5851 state->freesync_config = cur->freesync_config;
5852 state->cm_has_degamma = cur->cm_has_degamma;
5853 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5854 /* TODO Duplicate dc_stream after objects are stream object is flattened */
5856 return &state->base;
5859 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
5860 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
5862 crtc_debugfs_init(crtc);
5868 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5870 enum dc_irq_source irq_source;
5871 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5872 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5875 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5877 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5879 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
5880 acrtc->crtc_id, enable ? "en" : "dis", rc);
5884 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5886 enum dc_irq_source irq_source;
5887 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5888 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5889 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5890 #if defined(CONFIG_DRM_AMD_DC_DCN)
5891 struct amdgpu_display_manager *dm = &adev->dm;
5892 unsigned long flags;
5897 /* vblank irq on -> Only need vupdate irq in vrr mode */
5898 if (amdgpu_dm_vrr_active(acrtc_state))
5899 rc = dm_set_vupdate_irq(crtc, true);
5901 /* vblank irq off -> vupdate irq off */
5902 rc = dm_set_vupdate_irq(crtc, false);
5908 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5910 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5913 if (amdgpu_in_reset(adev))
5916 #if defined(CONFIG_DRM_AMD_DC_DCN)
5917 spin_lock_irqsave(&dm->vblank_lock, flags);
5918 dm->vblank_workqueue->dm = dm;
5919 dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
5920 dm->vblank_workqueue->enable = enable;
5921 spin_unlock_irqrestore(&dm->vblank_lock, flags);
5922 schedule_work(&dm->vblank_workqueue->mall_work);
5928 static int dm_enable_vblank(struct drm_crtc *crtc)
5930 return dm_set_vblank(crtc, true);
5933 static void dm_disable_vblank(struct drm_crtc *crtc)
5935 dm_set_vblank(crtc, false);
5938 /* Implemented only the options currently availible for the driver */
5939 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5940 .reset = dm_crtc_reset_state,
5941 .destroy = amdgpu_dm_crtc_destroy,
5942 .set_config = drm_atomic_helper_set_config,
5943 .page_flip = drm_atomic_helper_page_flip,
5944 .atomic_duplicate_state = dm_crtc_duplicate_state,
5945 .atomic_destroy_state = dm_crtc_destroy_state,
5946 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
5947 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5948 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5949 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
5950 .enable_vblank = dm_enable_vblank,
5951 .disable_vblank = dm_disable_vblank,
5952 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5953 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
5954 .late_register = amdgpu_dm_crtc_late_register,
5958 static enum drm_connector_status
5959 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5962 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5966 * 1. This interface is NOT called in context of HPD irq.
5967 * 2. This interface *is called* in context of user-mode ioctl. Which
5968 * makes it a bad place for *any* MST-related activity.
5971 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5972 !aconnector->fake_enable)
5973 connected = (aconnector->dc_sink != NULL);
5975 connected = (aconnector->base.force == DRM_FORCE_ON);
5977 update_subconnector_property(aconnector);
5979 return (connected ? connector_status_connected :
5980 connector_status_disconnected);
5983 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5984 struct drm_connector_state *connector_state,
5985 struct drm_property *property,
5988 struct drm_device *dev = connector->dev;
5989 struct amdgpu_device *adev = drm_to_adev(dev);
5990 struct dm_connector_state *dm_old_state =
5991 to_dm_connector_state(connector->state);
5992 struct dm_connector_state *dm_new_state =
5993 to_dm_connector_state(connector_state);
5997 if (property == dev->mode_config.scaling_mode_property) {
5998 enum amdgpu_rmx_type rmx_type;
6001 case DRM_MODE_SCALE_CENTER:
6002 rmx_type = RMX_CENTER;
6004 case DRM_MODE_SCALE_ASPECT:
6005 rmx_type = RMX_ASPECT;
6007 case DRM_MODE_SCALE_FULLSCREEN:
6008 rmx_type = RMX_FULL;
6010 case DRM_MODE_SCALE_NONE:
6016 if (dm_old_state->scaling == rmx_type)
6019 dm_new_state->scaling = rmx_type;
6021 } else if (property == adev->mode_info.underscan_hborder_property) {
6022 dm_new_state->underscan_hborder = val;
6024 } else if (property == adev->mode_info.underscan_vborder_property) {
6025 dm_new_state->underscan_vborder = val;
6027 } else if (property == adev->mode_info.underscan_property) {
6028 dm_new_state->underscan_enable = val;
6030 } else if (property == adev->mode_info.abm_level_property) {
6031 dm_new_state->abm_level = val;
6038 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6039 const struct drm_connector_state *state,
6040 struct drm_property *property,
6043 struct drm_device *dev = connector->dev;
6044 struct amdgpu_device *adev = drm_to_adev(dev);
6045 struct dm_connector_state *dm_state =
6046 to_dm_connector_state(state);
6049 if (property == dev->mode_config.scaling_mode_property) {
6050 switch (dm_state->scaling) {
6052 *val = DRM_MODE_SCALE_CENTER;
6055 *val = DRM_MODE_SCALE_ASPECT;
6058 *val = DRM_MODE_SCALE_FULLSCREEN;
6062 *val = DRM_MODE_SCALE_NONE;
6066 } else if (property == adev->mode_info.underscan_hborder_property) {
6067 *val = dm_state->underscan_hborder;
6069 } else if (property == adev->mode_info.underscan_vborder_property) {
6070 *val = dm_state->underscan_vborder;
6072 } else if (property == adev->mode_info.underscan_property) {
6073 *val = dm_state->underscan_enable;
6075 } else if (property == adev->mode_info.abm_level_property) {
6076 *val = dm_state->abm_level;
6083 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6085 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6087 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6090 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6092 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6093 const struct dc_link *link = aconnector->dc_link;
6094 struct amdgpu_device *adev = drm_to_adev(connector->dev);
6095 struct amdgpu_display_manager *dm = &adev->dm;
6098 * Call only if mst_mgr was iniitalized before since it's not done
6099 * for all connector types.
6101 if (aconnector->mst_mgr.dev)
6102 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6104 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6105 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6107 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
6108 link->type != dc_connection_none &&
6109 dm->backlight_dev) {
6110 backlight_device_unregister(dm->backlight_dev);
6111 dm->backlight_dev = NULL;
6115 if (aconnector->dc_em_sink)
6116 dc_sink_release(aconnector->dc_em_sink);
6117 aconnector->dc_em_sink = NULL;
6118 if (aconnector->dc_sink)
6119 dc_sink_release(aconnector->dc_sink);
6120 aconnector->dc_sink = NULL;
6122 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6123 drm_connector_unregister(connector);
6124 drm_connector_cleanup(connector);
6125 if (aconnector->i2c) {
6126 i2c_del_adapter(&aconnector->i2c->base);
6127 kfree(aconnector->i2c);
6129 kfree(aconnector->dm_dp_aux.aux.name);
6134 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6136 struct dm_connector_state *state =
6137 to_dm_connector_state(connector->state);
6139 if (connector->state)
6140 __drm_atomic_helper_connector_destroy_state(connector->state);
6144 state = kzalloc(sizeof(*state), GFP_KERNEL);
6147 state->scaling = RMX_OFF;
6148 state->underscan_enable = false;
6149 state->underscan_hborder = 0;
6150 state->underscan_vborder = 0;
6151 state->base.max_requested_bpc = 8;
6152 state->vcpi_slots = 0;
6154 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6155 state->abm_level = amdgpu_dm_abm_level;
6157 __drm_atomic_helper_connector_reset(connector, &state->base);
6161 struct drm_connector_state *
6162 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6164 struct dm_connector_state *state =
6165 to_dm_connector_state(connector->state);
6167 struct dm_connector_state *new_state =
6168 kmemdup(state, sizeof(*state), GFP_KERNEL);
6173 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6175 new_state->freesync_capable = state->freesync_capable;
6176 new_state->abm_level = state->abm_level;
6177 new_state->scaling = state->scaling;
6178 new_state->underscan_enable = state->underscan_enable;
6179 new_state->underscan_hborder = state->underscan_hborder;
6180 new_state->underscan_vborder = state->underscan_vborder;
6181 new_state->vcpi_slots = state->vcpi_slots;
6182 new_state->pbn = state->pbn;
6183 return &new_state->base;
6187 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6189 struct amdgpu_dm_connector *amdgpu_dm_connector =
6190 to_amdgpu_dm_connector(connector);
6193 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6194 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6195 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6196 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6201 #if defined(CONFIG_DEBUG_FS)
6202 connector_debugfs_init(amdgpu_dm_connector);
6208 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6209 .reset = amdgpu_dm_connector_funcs_reset,
6210 .detect = amdgpu_dm_connector_detect,
6211 .fill_modes = drm_helper_probe_single_connector_modes,
6212 .destroy = amdgpu_dm_connector_destroy,
6213 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6214 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6215 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6216 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6217 .late_register = amdgpu_dm_connector_late_register,
6218 .early_unregister = amdgpu_dm_connector_unregister
6221 static int get_modes(struct drm_connector *connector)
6223 return amdgpu_dm_connector_get_modes(connector);
6226 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6228 struct dc_sink_init_data init_params = {
6229 .link = aconnector->dc_link,
6230 .sink_signal = SIGNAL_TYPE_VIRTUAL
6234 if (!aconnector->base.edid_blob_ptr) {
6235 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6236 aconnector->base.name);
6238 aconnector->base.force = DRM_FORCE_OFF;
6239 aconnector->base.override_edid = false;
6243 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6245 aconnector->edid = edid;
6247 aconnector->dc_em_sink = dc_link_add_remote_sink(
6248 aconnector->dc_link,
6250 (edid->extensions + 1) * EDID_LENGTH,
6253 if (aconnector->base.force == DRM_FORCE_ON) {
6254 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6255 aconnector->dc_link->local_sink :
6256 aconnector->dc_em_sink;
6257 dc_sink_retain(aconnector->dc_sink);
6261 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6263 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6266 * In case of headless boot with force on for DP managed connector
6267 * Those settings have to be != 0 to get initial modeset
6269 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6270 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6271 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6275 aconnector->base.override_edid = true;
6276 create_eml_sink(aconnector);
6279 static struct dc_stream_state *
6280 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6281 const struct drm_display_mode *drm_mode,
6282 const struct dm_connector_state *dm_state,
6283 const struct dc_stream_state *old_stream)
6285 struct drm_connector *connector = &aconnector->base;
6286 struct amdgpu_device *adev = drm_to_adev(connector->dev);
6287 struct dc_stream_state *stream;
6288 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6289 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6290 enum dc_status dc_result = DC_OK;
6293 stream = create_stream_for_sink(aconnector, drm_mode,
6294 dm_state, old_stream,
6296 if (stream == NULL) {
6297 DRM_ERROR("Failed to create stream for sink!\n");
6301 dc_result = dc_validate_stream(adev->dm.dc, stream);
6303 if (dc_result != DC_OK) {
6304 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6309 dc_status_to_str(dc_result));
6311 dc_stream_release(stream);
6313 requested_bpc -= 2; /* lower bpc to retry validation */
6316 } while (stream == NULL && requested_bpc >= 6);
6318 if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6319 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6321 aconnector->force_yuv420_output = true;
6322 stream = create_validate_stream_for_sink(aconnector, drm_mode,
6323 dm_state, old_stream);
6324 aconnector->force_yuv420_output = false;
6330 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6331 struct drm_display_mode *mode)
6333 int result = MODE_ERROR;
6334 struct dc_sink *dc_sink;
6335 /* TODO: Unhardcode stream count */
6336 struct dc_stream_state *stream;
6337 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6339 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6340 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6344 * Only run this the first time mode_valid is called to initilialize
6347 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6348 !aconnector->dc_em_sink)
6349 handle_edid_mgmt(aconnector);
6351 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6353 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6354 aconnector->base.force != DRM_FORCE_ON) {
6355 DRM_ERROR("dc_sink is NULL!\n");
6359 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6361 dc_stream_release(stream);
6366 /* TODO: error handling*/
6370 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6371 struct dc_info_packet *out)
6373 struct hdmi_drm_infoframe frame;
6374 unsigned char buf[30]; /* 26 + 4 */
6378 memset(out, 0, sizeof(*out));
6380 if (!state->hdr_output_metadata)
6383 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6387 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6391 /* Static metadata is a fixed 26 bytes + 4 byte header. */
6395 /* Prepare the infopacket for DC. */
6396 switch (state->connector->connector_type) {
6397 case DRM_MODE_CONNECTOR_HDMIA:
6398 out->hb0 = 0x87; /* type */
6399 out->hb1 = 0x01; /* version */
6400 out->hb2 = 0x1A; /* length */
6401 out->sb[0] = buf[3]; /* checksum */
6405 case DRM_MODE_CONNECTOR_DisplayPort:
6406 case DRM_MODE_CONNECTOR_eDP:
6407 out->hb0 = 0x00; /* sdp id, zero */
6408 out->hb1 = 0x87; /* type */
6409 out->hb2 = 0x1D; /* payload len - 1 */
6410 out->hb3 = (0x13 << 2); /* sdp version */
6411 out->sb[0] = 0x01; /* version */
6412 out->sb[1] = 0x1A; /* length */
6420 memcpy(&out->sb[i], &buf[4], 26);
6423 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6424 sizeof(out->sb), false);
6430 is_hdr_metadata_different(const struct drm_connector_state *old_state,
6431 const struct drm_connector_state *new_state)
6433 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
6434 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
6436 if (old_blob != new_blob) {
6437 if (old_blob && new_blob &&
6438 old_blob->length == new_blob->length)
6439 return memcmp(old_blob->data, new_blob->data,
6449 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6450 struct drm_atomic_state *state)
6452 struct drm_connector_state *new_con_state =
6453 drm_atomic_get_new_connector_state(state, conn);
6454 struct drm_connector_state *old_con_state =
6455 drm_atomic_get_old_connector_state(state, conn);
6456 struct drm_crtc *crtc = new_con_state->crtc;
6457 struct drm_crtc_state *new_crtc_state;
6460 trace_amdgpu_dm_connector_atomic_check(new_con_state);
6465 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
6466 struct dc_info_packet hdr_infopacket;
6468 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6472 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6473 if (IS_ERR(new_crtc_state))
6474 return PTR_ERR(new_crtc_state);
6477 * DC considers the stream backends changed if the
6478 * static metadata changes. Forcing the modeset also
6479 * gives a simple way for userspace to switch from
6480 * 8bpc to 10bpc when setting the metadata to enter
6483 * Changing the static metadata after it's been
6484 * set is permissible, however. So only force a
6485 * modeset if we're entering or exiting HDR.
6487 new_crtc_state->mode_changed =
6488 !old_con_state->hdr_output_metadata ||
6489 !new_con_state->hdr_output_metadata;
6495 static const struct drm_connector_helper_funcs
6496 amdgpu_dm_connector_helper_funcs = {
6498 * If hotplugging a second bigger display in FB Con mode, bigger resolution
6499 * modes will be filtered by drm_mode_validate_size(), and those modes
6500 * are missing after user start lightdm. So we need to renew modes list.
6501 * in get_modes call back, not just return the modes count
6503 .get_modes = get_modes,
6504 .mode_valid = amdgpu_dm_connector_mode_valid,
6505 .atomic_check = amdgpu_dm_connector_atomic_check,
6508 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6512 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6514 struct drm_atomic_state *state = new_crtc_state->state;
6515 struct drm_plane *plane;
6518 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6519 struct drm_plane_state *new_plane_state;
6521 /* Cursor planes are "fake". */
6522 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6525 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6527 if (!new_plane_state) {
6529 * The plane is enable on the CRTC and hasn't changed
6530 * state. This means that it previously passed
6531 * validation and is therefore enabled.
6537 /* We need a framebuffer to be considered enabled. */
6538 num_active += (new_plane_state->fb != NULL);
6544 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6545 struct drm_crtc_state *new_crtc_state)
6547 struct dm_crtc_state *dm_new_crtc_state =
6548 to_dm_crtc_state(new_crtc_state);
6550 dm_new_crtc_state->active_planes = 0;
6552 if (!dm_new_crtc_state->stream)
6555 dm_new_crtc_state->active_planes =
6556 count_crtc_active_planes(new_crtc_state);
6559 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6560 struct drm_atomic_state *state)
6562 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6564 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6565 struct dc *dc = adev->dm.dc;
6566 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6569 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6571 dm_update_crtc_active_planes(crtc, crtc_state);
6573 if (unlikely(!dm_crtc_state->stream &&
6574 modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6580 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6581 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6582 * planes are disabled, which is not supported by the hardware. And there is legacy
6583 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6585 if (crtc_state->enable &&
6586 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6587 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6591 /* In some use cases, like reset, no stream is attached */
6592 if (!dm_crtc_state->stream)
6595 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6598 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6602 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6603 const struct drm_display_mode *mode,
6604 struct drm_display_mode *adjusted_mode)
6609 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6610 .disable = dm_crtc_helper_disable,
6611 .atomic_check = dm_crtc_helper_atomic_check,
6612 .mode_fixup = dm_crtc_helper_mode_fixup,
6613 .get_scanout_position = amdgpu_crtc_get_scanout_position,
6616 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6621 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6623 switch (display_color_depth) {
6624 case COLOR_DEPTH_666:
6626 case COLOR_DEPTH_888:
6628 case COLOR_DEPTH_101010:
6630 case COLOR_DEPTH_121212:
6632 case COLOR_DEPTH_141414:
6634 case COLOR_DEPTH_161616:
6642 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6643 struct drm_crtc_state *crtc_state,
6644 struct drm_connector_state *conn_state)
6646 struct drm_atomic_state *state = crtc_state->state;
6647 struct drm_connector *connector = conn_state->connector;
6648 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6649 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6650 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6651 struct drm_dp_mst_topology_mgr *mst_mgr;
6652 struct drm_dp_mst_port *mst_port;
6653 enum dc_color_depth color_depth;
6655 bool is_y420 = false;
6657 if (!aconnector->port || !aconnector->dc_sink)
6660 mst_port = aconnector->port;
6661 mst_mgr = &aconnector->mst_port->mst_mgr;
6663 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6666 if (!state->duplicated) {
6667 int max_bpc = conn_state->max_requested_bpc;
6668 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6669 aconnector->force_yuv420_output;
6670 color_depth = convert_color_depth_from_display_info(connector,
6673 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6674 clock = adjusted_mode->clock;
6675 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6677 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6680 dm_new_connector_state->pbn,
6681 dm_mst_get_pbn_divider(aconnector->dc_link));
6682 if (dm_new_connector_state->vcpi_slots < 0) {
6683 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6684 return dm_new_connector_state->vcpi_slots;
6689 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6690 .disable = dm_encoder_helper_disable,
6691 .atomic_check = dm_encoder_helper_atomic_check
6694 #if defined(CONFIG_DRM_AMD_DC_DCN)
6695 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6696 struct dc_state *dc_state)
6698 struct dc_stream_state *stream = NULL;
6699 struct drm_connector *connector;
6700 struct drm_connector_state *new_con_state;
6701 struct amdgpu_dm_connector *aconnector;
6702 struct dm_connector_state *dm_conn_state;
6703 int i, j, clock, bpp;
6704 int vcpi, pbn_div, pbn = 0;
6706 for_each_new_connector_in_state(state, connector, new_con_state, i) {
6708 aconnector = to_amdgpu_dm_connector(connector);
6710 if (!aconnector->port)
6713 if (!new_con_state || !new_con_state->crtc)
6716 dm_conn_state = to_dm_connector_state(new_con_state);
6718 for (j = 0; j < dc_state->stream_count; j++) {
6719 stream = dc_state->streams[j];
6723 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6732 if (stream->timing.flags.DSC != 1) {
6733 drm_dp_mst_atomic_enable_dsc(state,
6741 pbn_div = dm_mst_get_pbn_divider(stream->link);
6742 bpp = stream->timing.dsc_cfg.bits_per_pixel;
6743 clock = stream->timing.pix_clk_100hz / 10;
6744 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6745 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6752 dm_conn_state->pbn = pbn;
6753 dm_conn_state->vcpi_slots = vcpi;
6759 static void dm_drm_plane_reset(struct drm_plane *plane)
6761 struct dm_plane_state *amdgpu_state = NULL;
6764 plane->funcs->atomic_destroy_state(plane, plane->state);
6766 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6767 WARN_ON(amdgpu_state == NULL);
6770 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6773 static struct drm_plane_state *
6774 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6776 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6778 old_dm_plane_state = to_dm_plane_state(plane->state);
6779 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6780 if (!dm_plane_state)
6783 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6785 if (old_dm_plane_state->dc_state) {
6786 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6787 dc_plane_state_retain(dm_plane_state->dc_state);
6790 return &dm_plane_state->base;
6793 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6794 struct drm_plane_state *state)
6796 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6798 if (dm_plane_state->dc_state)
6799 dc_plane_state_release(dm_plane_state->dc_state);
6801 drm_atomic_helper_plane_destroy_state(plane, state);
6804 static const struct drm_plane_funcs dm_plane_funcs = {
6805 .update_plane = drm_atomic_helper_update_plane,
6806 .disable_plane = drm_atomic_helper_disable_plane,
6807 .destroy = drm_primary_helper_destroy,
6808 .reset = dm_drm_plane_reset,
6809 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6810 .atomic_destroy_state = dm_drm_plane_destroy_state,
6811 .format_mod_supported = dm_plane_format_mod_supported,
6814 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6815 struct drm_plane_state *new_state)
6817 struct amdgpu_framebuffer *afb;
6818 struct drm_gem_object *obj;
6819 struct amdgpu_device *adev;
6820 struct amdgpu_bo *rbo;
6821 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6822 struct list_head list;
6823 struct ttm_validate_buffer tv;
6824 struct ww_acquire_ctx ticket;
6828 if (!new_state->fb) {
6829 DRM_DEBUG_KMS("No FB bound\n");
6833 afb = to_amdgpu_framebuffer(new_state->fb);
6834 obj = new_state->fb->obj[0];
6835 rbo = gem_to_amdgpu_bo(obj);
6836 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6837 INIT_LIST_HEAD(&list);
6841 list_add(&tv.head, &list);
6843 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6845 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6849 if (plane->type != DRM_PLANE_TYPE_CURSOR)
6850 domain = amdgpu_display_supported_domains(adev, rbo->flags);
6852 domain = AMDGPU_GEM_DOMAIN_VRAM;
6854 r = amdgpu_bo_pin(rbo, domain);
6855 if (unlikely(r != 0)) {
6856 if (r != -ERESTARTSYS)
6857 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6858 ttm_eu_backoff_reservation(&ticket, &list);
6862 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6863 if (unlikely(r != 0)) {
6864 amdgpu_bo_unpin(rbo);
6865 ttm_eu_backoff_reservation(&ticket, &list);
6866 DRM_ERROR("%p bind failed\n", rbo);
6870 ttm_eu_backoff_reservation(&ticket, &list);
6872 afb->address = amdgpu_bo_gpu_offset(rbo);
6877 * We don't do surface updates on planes that have been newly created,
6878 * but we also don't have the afb->address during atomic check.
6880 * Fill in buffer attributes depending on the address here, but only on
6881 * newly created planes since they're not being used by DC yet and this
6882 * won't modify global state.
6884 dm_plane_state_old = to_dm_plane_state(plane->state);
6885 dm_plane_state_new = to_dm_plane_state(new_state);
6887 if (dm_plane_state_new->dc_state &&
6888 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6889 struct dc_plane_state *plane_state =
6890 dm_plane_state_new->dc_state;
6891 bool force_disable_dcc = !plane_state->dcc.enable;
6893 fill_plane_buffer_attributes(
6894 adev, afb, plane_state->format, plane_state->rotation,
6896 &plane_state->tiling_info, &plane_state->plane_size,
6897 &plane_state->dcc, &plane_state->address,
6898 afb->tmz_surface, force_disable_dcc);
6904 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6905 struct drm_plane_state *old_state)
6907 struct amdgpu_bo *rbo;
6913 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6914 r = amdgpu_bo_reserve(rbo, false);
6916 DRM_ERROR("failed to reserve rbo before unpin\n");
6920 amdgpu_bo_unpin(rbo);
6921 amdgpu_bo_unreserve(rbo);
6922 amdgpu_bo_unref(&rbo);
6925 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6926 struct drm_crtc_state *new_crtc_state)
6928 struct drm_framebuffer *fb = state->fb;
6929 int min_downscale, max_upscale;
6931 int max_scale = INT_MAX;
6933 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6934 if (fb && state->crtc) {
6935 /* Validate viewport to cover the case when only the position changes */
6936 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
6937 int viewport_width = state->crtc_w;
6938 int viewport_height = state->crtc_h;
6940 if (state->crtc_x < 0)
6941 viewport_width += state->crtc_x;
6942 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
6943 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
6945 if (state->crtc_y < 0)
6946 viewport_height += state->crtc_y;
6947 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
6948 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
6950 if (viewport_width < 0 || viewport_height < 0) {
6951 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
6953 } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
6954 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
6956 } else if (viewport_height < MIN_VIEWPORT_SIZE) {
6957 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
6963 /* Get min/max allowed scaling factors from plane caps. */
6964 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6965 &min_downscale, &max_upscale);
6967 * Convert to drm convention: 16.16 fixed point, instead of dc's
6968 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6969 * dst/src, so min_scale = 1.0 / max_upscale, etc.
6971 min_scale = (1000 << 16) / max_upscale;
6972 max_scale = (1000 << 16) / min_downscale;
6975 return drm_atomic_helper_check_plane_state(
6976 state, new_crtc_state, min_scale, max_scale, true, true);
6979 static int dm_plane_atomic_check(struct drm_plane *plane,
6980 struct drm_atomic_state *state)
6982 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
6984 struct amdgpu_device *adev = drm_to_adev(plane->dev);
6985 struct dc *dc = adev->dm.dc;
6986 struct dm_plane_state *dm_plane_state;
6987 struct dc_scaling_info scaling_info;
6988 struct drm_crtc_state *new_crtc_state;
6991 trace_amdgpu_dm_plane_atomic_check(new_plane_state);
6993 dm_plane_state = to_dm_plane_state(new_plane_state);
6995 if (!dm_plane_state->dc_state)
6999 drm_atomic_get_new_crtc_state(state,
7000 new_plane_state->crtc);
7001 if (!new_crtc_state)
7004 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7008 ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
7012 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7018 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7019 struct drm_atomic_state *state)
7021 /* Only support async updates on cursor planes. */
7022 if (plane->type != DRM_PLANE_TYPE_CURSOR)
7028 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7029 struct drm_atomic_state *state)
7031 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7033 struct drm_plane_state *old_state =
7034 drm_atomic_get_old_plane_state(state, plane);
7036 trace_amdgpu_dm_atomic_update_cursor(new_state);
7038 swap(plane->state->fb, new_state->fb);
7040 plane->state->src_x = new_state->src_x;
7041 plane->state->src_y = new_state->src_y;
7042 plane->state->src_w = new_state->src_w;
7043 plane->state->src_h = new_state->src_h;
7044 plane->state->crtc_x = new_state->crtc_x;
7045 plane->state->crtc_y = new_state->crtc_y;
7046 plane->state->crtc_w = new_state->crtc_w;
7047 plane->state->crtc_h = new_state->crtc_h;
7049 handle_cursor_update(plane, old_state);
7052 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7053 .prepare_fb = dm_plane_helper_prepare_fb,
7054 .cleanup_fb = dm_plane_helper_cleanup_fb,
7055 .atomic_check = dm_plane_atomic_check,
7056 .atomic_async_check = dm_plane_atomic_async_check,
7057 .atomic_async_update = dm_plane_atomic_async_update
7061 * TODO: these are currently initialized to rgb formats only.
7062 * For future use cases we should either initialize them dynamically based on
7063 * plane capabilities, or initialize this array to all formats, so internal drm
7064 * check will succeed, and let DC implement proper check
7066 static const uint32_t rgb_formats[] = {
7067 DRM_FORMAT_XRGB8888,
7068 DRM_FORMAT_ARGB8888,
7069 DRM_FORMAT_RGBA8888,
7070 DRM_FORMAT_XRGB2101010,
7071 DRM_FORMAT_XBGR2101010,
7072 DRM_FORMAT_ARGB2101010,
7073 DRM_FORMAT_ABGR2101010,
7074 DRM_FORMAT_XBGR8888,
7075 DRM_FORMAT_ABGR8888,
7079 static const uint32_t overlay_formats[] = {
7080 DRM_FORMAT_XRGB8888,
7081 DRM_FORMAT_ARGB8888,
7082 DRM_FORMAT_RGBA8888,
7083 DRM_FORMAT_XBGR8888,
7084 DRM_FORMAT_ABGR8888,
7088 static const u32 cursor_formats[] = {
7092 static int get_plane_formats(const struct drm_plane *plane,
7093 const struct dc_plane_cap *plane_cap,
7094 uint32_t *formats, int max_formats)
7096 int i, num_formats = 0;
7099 * TODO: Query support for each group of formats directly from
7100 * DC plane caps. This will require adding more formats to the
7104 switch (plane->type) {
7105 case DRM_PLANE_TYPE_PRIMARY:
7106 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7107 if (num_formats >= max_formats)
7110 formats[num_formats++] = rgb_formats[i];
7113 if (plane_cap && plane_cap->pixel_format_support.nv12)
7114 formats[num_formats++] = DRM_FORMAT_NV12;
7115 if (plane_cap && plane_cap->pixel_format_support.p010)
7116 formats[num_formats++] = DRM_FORMAT_P010;
7117 if (plane_cap && plane_cap->pixel_format_support.fp16) {
7118 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7119 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7120 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7121 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7125 case DRM_PLANE_TYPE_OVERLAY:
7126 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7127 if (num_formats >= max_formats)
7130 formats[num_formats++] = overlay_formats[i];
7134 case DRM_PLANE_TYPE_CURSOR:
7135 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7136 if (num_formats >= max_formats)
7139 formats[num_formats++] = cursor_formats[i];
7147 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7148 struct drm_plane *plane,
7149 unsigned long possible_crtcs,
7150 const struct dc_plane_cap *plane_cap)
7152 uint32_t formats[32];
7155 unsigned int supported_rotations;
7156 uint64_t *modifiers = NULL;
7158 num_formats = get_plane_formats(plane, plane_cap, formats,
7159 ARRAY_SIZE(formats));
7161 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7165 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7166 &dm_plane_funcs, formats, num_formats,
7167 modifiers, plane->type, NULL);
7172 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7173 plane_cap && plane_cap->per_pixel_alpha) {
7174 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7175 BIT(DRM_MODE_BLEND_PREMULTI);
7177 drm_plane_create_alpha_property(plane);
7178 drm_plane_create_blend_mode_property(plane, blend_caps);
7181 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7183 (plane_cap->pixel_format_support.nv12 ||
7184 plane_cap->pixel_format_support.p010)) {
7185 /* This only affects YUV formats. */
7186 drm_plane_create_color_properties(
7188 BIT(DRM_COLOR_YCBCR_BT601) |
7189 BIT(DRM_COLOR_YCBCR_BT709) |
7190 BIT(DRM_COLOR_YCBCR_BT2020),
7191 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7192 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7193 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7196 supported_rotations =
7197 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7198 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7200 if (dm->adev->asic_type >= CHIP_BONAIRE &&
7201 plane->type != DRM_PLANE_TYPE_CURSOR)
7202 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7203 supported_rotations);
7205 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7207 /* Create (reset) the plane state */
7208 if (plane->funcs->reset)
7209 plane->funcs->reset(plane);
7214 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7215 struct drm_plane *plane,
7216 uint32_t crtc_index)
7218 struct amdgpu_crtc *acrtc = NULL;
7219 struct drm_plane *cursor_plane;
7223 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7227 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7228 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7230 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7234 res = drm_crtc_init_with_planes(
7239 &amdgpu_dm_crtc_funcs, NULL);
7244 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7246 /* Create (reset) the plane state */
7247 if (acrtc->base.funcs->reset)
7248 acrtc->base.funcs->reset(&acrtc->base);
7250 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7251 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7253 acrtc->crtc_id = crtc_index;
7254 acrtc->base.enabled = false;
7255 acrtc->otg_inst = -1;
7257 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7258 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7259 true, MAX_COLOR_LUT_ENTRIES);
7260 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7266 kfree(cursor_plane);
7271 static int to_drm_connector_type(enum signal_type st)
7274 case SIGNAL_TYPE_HDMI_TYPE_A:
7275 return DRM_MODE_CONNECTOR_HDMIA;
7276 case SIGNAL_TYPE_EDP:
7277 return DRM_MODE_CONNECTOR_eDP;
7278 case SIGNAL_TYPE_LVDS:
7279 return DRM_MODE_CONNECTOR_LVDS;
7280 case SIGNAL_TYPE_RGB:
7281 return DRM_MODE_CONNECTOR_VGA;
7282 case SIGNAL_TYPE_DISPLAY_PORT:
7283 case SIGNAL_TYPE_DISPLAY_PORT_MST:
7284 return DRM_MODE_CONNECTOR_DisplayPort;
7285 case SIGNAL_TYPE_DVI_DUAL_LINK:
7286 case SIGNAL_TYPE_DVI_SINGLE_LINK:
7287 return DRM_MODE_CONNECTOR_DVID;
7288 case SIGNAL_TYPE_VIRTUAL:
7289 return DRM_MODE_CONNECTOR_VIRTUAL;
7292 return DRM_MODE_CONNECTOR_Unknown;
7296 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7298 struct drm_encoder *encoder;
7300 /* There is only one encoder per connector */
7301 drm_connector_for_each_possible_encoder(connector, encoder)
7307 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7309 struct drm_encoder *encoder;
7310 struct amdgpu_encoder *amdgpu_encoder;
7312 encoder = amdgpu_dm_connector_to_encoder(connector);
7314 if (encoder == NULL)
7317 amdgpu_encoder = to_amdgpu_encoder(encoder);
7319 amdgpu_encoder->native_mode.clock = 0;
7321 if (!list_empty(&connector->probed_modes)) {
7322 struct drm_display_mode *preferred_mode = NULL;
7324 list_for_each_entry(preferred_mode,
7325 &connector->probed_modes,
7327 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7328 amdgpu_encoder->native_mode = *preferred_mode;
7336 static struct drm_display_mode *
7337 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7339 int hdisplay, int vdisplay)
7341 struct drm_device *dev = encoder->dev;
7342 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7343 struct drm_display_mode *mode = NULL;
7344 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7346 mode = drm_mode_duplicate(dev, native_mode);
7351 mode->hdisplay = hdisplay;
7352 mode->vdisplay = vdisplay;
7353 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7354 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7360 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7361 struct drm_connector *connector)
7363 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7364 struct drm_display_mode *mode = NULL;
7365 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7366 struct amdgpu_dm_connector *amdgpu_dm_connector =
7367 to_amdgpu_dm_connector(connector);
7371 char name[DRM_DISPLAY_MODE_LEN];
7374 } common_modes[] = {
7375 { "640x480", 640, 480},
7376 { "800x600", 800, 600},
7377 { "1024x768", 1024, 768},
7378 { "1280x720", 1280, 720},
7379 { "1280x800", 1280, 800},
7380 {"1280x1024", 1280, 1024},
7381 { "1440x900", 1440, 900},
7382 {"1680x1050", 1680, 1050},
7383 {"1600x1200", 1600, 1200},
7384 {"1920x1080", 1920, 1080},
7385 {"1920x1200", 1920, 1200}
7388 n = ARRAY_SIZE(common_modes);
7390 for (i = 0; i < n; i++) {
7391 struct drm_display_mode *curmode = NULL;
7392 bool mode_existed = false;
7394 if (common_modes[i].w > native_mode->hdisplay ||
7395 common_modes[i].h > native_mode->vdisplay ||
7396 (common_modes[i].w == native_mode->hdisplay &&
7397 common_modes[i].h == native_mode->vdisplay))
7400 list_for_each_entry(curmode, &connector->probed_modes, head) {
7401 if (common_modes[i].w == curmode->hdisplay &&
7402 common_modes[i].h == curmode->vdisplay) {
7403 mode_existed = true;
7411 mode = amdgpu_dm_create_common_mode(encoder,
7412 common_modes[i].name, common_modes[i].w,
7414 drm_mode_probed_add(connector, mode);
7415 amdgpu_dm_connector->num_modes++;
7419 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7422 struct amdgpu_dm_connector *amdgpu_dm_connector =
7423 to_amdgpu_dm_connector(connector);
7426 /* empty probed_modes */
7427 INIT_LIST_HEAD(&connector->probed_modes);
7428 amdgpu_dm_connector->num_modes =
7429 drm_add_edid_modes(connector, edid);
7431 /* sorting the probed modes before calling function
7432 * amdgpu_dm_get_native_mode() since EDID can have
7433 * more than one preferred mode. The modes that are
7434 * later in the probed mode list could be of higher
7435 * and preferred resolution. For example, 3840x2160
7436 * resolution in base EDID preferred timing and 4096x2160
7437 * preferred resolution in DID extension block later.
7439 drm_mode_sort(&connector->probed_modes);
7440 amdgpu_dm_get_native_mode(connector);
7442 /* Freesync capabilities are reset by calling
7443 * drm_add_edid_modes() and need to be
7446 amdgpu_dm_update_freesync_caps(connector, edid);
7448 amdgpu_dm_connector->num_modes = 0;
7452 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7453 struct drm_display_mode *mode)
7455 struct drm_display_mode *m;
7457 list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7458 if (drm_mode_equal(m, mode))
7465 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7467 const struct drm_display_mode *m;
7468 struct drm_display_mode *new_mode;
7470 uint32_t new_modes_count = 0;
7472 /* Standard FPS values
7481 * 60 - Commonly used
7482 * 48,72,96 - Multiples of 24
7484 const uint32_t common_rates[] = { 23976, 24000, 25000, 29970, 30000,
7485 48000, 50000, 60000, 72000, 96000 };
7488 * Find mode with highest refresh rate with the same resolution
7489 * as the preferred mode. Some monitors report a preferred mode
7490 * with lower resolution than the highest refresh rate supported.
7493 m = get_highest_refresh_rate_mode(aconnector, true);
7497 for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7498 uint64_t target_vtotal, target_vtotal_diff;
7501 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7504 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7505 common_rates[i] > aconnector->max_vfreq * 1000)
7508 num = (unsigned long long)m->clock * 1000 * 1000;
7509 den = common_rates[i] * (unsigned long long)m->htotal;
7510 target_vtotal = div_u64(num, den);
7511 target_vtotal_diff = target_vtotal - m->vtotal;
7513 /* Check for illegal modes */
7514 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7515 m->vsync_end + target_vtotal_diff < m->vsync_start ||
7516 m->vtotal + target_vtotal_diff < m->vsync_end)
7519 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7523 new_mode->vtotal += (u16)target_vtotal_diff;
7524 new_mode->vsync_start += (u16)target_vtotal_diff;
7525 new_mode->vsync_end += (u16)target_vtotal_diff;
7526 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7527 new_mode->type |= DRM_MODE_TYPE_DRIVER;
7529 if (!is_duplicate_mode(aconnector, new_mode)) {
7530 drm_mode_probed_add(&aconnector->base, new_mode);
7531 new_modes_count += 1;
7533 drm_mode_destroy(aconnector->base.dev, new_mode);
7536 return new_modes_count;
7539 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7542 struct amdgpu_dm_connector *amdgpu_dm_connector =
7543 to_amdgpu_dm_connector(connector);
7545 if (!(amdgpu_freesync_vid_mode && edid))
7548 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7549 amdgpu_dm_connector->num_modes +=
7550 add_fs_modes(amdgpu_dm_connector);
7553 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
7555 struct amdgpu_dm_connector *amdgpu_dm_connector =
7556 to_amdgpu_dm_connector(connector);
7557 struct drm_encoder *encoder;
7558 struct edid *edid = amdgpu_dm_connector->edid;
7560 encoder = amdgpu_dm_connector_to_encoder(connector);
7562 if (!drm_edid_is_valid(edid)) {
7563 amdgpu_dm_connector->num_modes =
7564 drm_add_modes_noedid(connector, 640, 480);
7566 amdgpu_dm_connector_ddc_get_modes(connector, edid);
7567 amdgpu_dm_connector_add_common_modes(encoder, connector);
7568 amdgpu_dm_connector_add_freesync_modes(connector, edid);
7570 amdgpu_dm_fbc_init(connector);
7572 return amdgpu_dm_connector->num_modes;
7575 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7576 struct amdgpu_dm_connector *aconnector,
7578 struct dc_link *link,
7581 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7584 * Some of the properties below require access to state, like bpc.
7585 * Allocate some default initial connector state with our reset helper.
7587 if (aconnector->base.funcs->reset)
7588 aconnector->base.funcs->reset(&aconnector->base);
7590 aconnector->connector_id = link_index;
7591 aconnector->dc_link = link;
7592 aconnector->base.interlace_allowed = false;
7593 aconnector->base.doublescan_allowed = false;
7594 aconnector->base.stereo_allowed = false;
7595 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7596 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7597 aconnector->audio_inst = -1;
7598 mutex_init(&aconnector->hpd_lock);
7601 * configure support HPD hot plug connector_>polled default value is 0
7602 * which means HPD hot plug not supported
7604 switch (connector_type) {
7605 case DRM_MODE_CONNECTOR_HDMIA:
7606 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7607 aconnector->base.ycbcr_420_allowed =
7608 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7610 case DRM_MODE_CONNECTOR_DisplayPort:
7611 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7612 aconnector->base.ycbcr_420_allowed =
7613 link->link_enc->features.dp_ycbcr420_supported ? true : false;
7615 case DRM_MODE_CONNECTOR_DVID:
7616 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7622 drm_object_attach_property(&aconnector->base.base,
7623 dm->ddev->mode_config.scaling_mode_property,
7624 DRM_MODE_SCALE_NONE);
7626 drm_object_attach_property(&aconnector->base.base,
7627 adev->mode_info.underscan_property,
7629 drm_object_attach_property(&aconnector->base.base,
7630 adev->mode_info.underscan_hborder_property,
7632 drm_object_attach_property(&aconnector->base.base,
7633 adev->mode_info.underscan_vborder_property,
7636 if (!aconnector->mst_port)
7637 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7639 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
7640 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7641 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7643 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7644 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7645 drm_object_attach_property(&aconnector->base.base,
7646 adev->mode_info.abm_level_property, 0);
7649 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7650 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7651 connector_type == DRM_MODE_CONNECTOR_eDP) {
7652 drm_object_attach_property(
7653 &aconnector->base.base,
7654 dm->ddev->mode_config.hdr_output_metadata_property, 0);
7656 if (!aconnector->mst_port)
7657 drm_connector_attach_vrr_capable_property(&aconnector->base);
7659 #ifdef CONFIG_DRM_AMD_DC_HDCP
7660 if (adev->dm.hdcp_workqueue)
7661 drm_connector_attach_content_protection_property(&aconnector->base, true);
7666 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7667 struct i2c_msg *msgs, int num)
7669 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7670 struct ddc_service *ddc_service = i2c->ddc_service;
7671 struct i2c_command cmd;
7675 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7680 cmd.number_of_payloads = num;
7681 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7684 for (i = 0; i < num; i++) {
7685 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7686 cmd.payloads[i].address = msgs[i].addr;
7687 cmd.payloads[i].length = msgs[i].len;
7688 cmd.payloads[i].data = msgs[i].buf;
7692 ddc_service->ctx->dc,
7693 ddc_service->ddc_pin->hw_info.ddc_channel,
7697 kfree(cmd.payloads);
7701 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7703 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7706 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7707 .master_xfer = amdgpu_dm_i2c_xfer,
7708 .functionality = amdgpu_dm_i2c_func,
7711 static struct amdgpu_i2c_adapter *
7712 create_i2c(struct ddc_service *ddc_service,
7716 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7717 struct amdgpu_i2c_adapter *i2c;
7719 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7722 i2c->base.owner = THIS_MODULE;
7723 i2c->base.class = I2C_CLASS_DDC;
7724 i2c->base.dev.parent = &adev->pdev->dev;
7725 i2c->base.algo = &amdgpu_dm_i2c_algo;
7726 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7727 i2c_set_adapdata(&i2c->base, i2c);
7728 i2c->ddc_service = ddc_service;
7729 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7736 * Note: this function assumes that dc_link_detect() was called for the
7737 * dc_link which will be represented by this aconnector.
7739 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7740 struct amdgpu_dm_connector *aconnector,
7741 uint32_t link_index,
7742 struct amdgpu_encoder *aencoder)
7746 struct dc *dc = dm->dc;
7747 struct dc_link *link = dc_get_link_at_index(dc, link_index);
7748 struct amdgpu_i2c_adapter *i2c;
7750 link->priv = aconnector;
7752 DRM_DEBUG_DRIVER("%s()\n", __func__);
7754 i2c = create_i2c(link->ddc, link->link_index, &res);
7756 DRM_ERROR("Failed to create i2c adapter data\n");
7760 aconnector->i2c = i2c;
7761 res = i2c_add_adapter(&i2c->base);
7764 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7768 connector_type = to_drm_connector_type(link->connector_signal);
7770 res = drm_connector_init_with_ddc(
7773 &amdgpu_dm_connector_funcs,
7778 DRM_ERROR("connector_init failed\n");
7779 aconnector->connector_id = -1;
7783 drm_connector_helper_add(
7785 &amdgpu_dm_connector_helper_funcs);
7787 amdgpu_dm_connector_init_helper(
7794 drm_connector_attach_encoder(
7795 &aconnector->base, &aencoder->base);
7797 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7798 || connector_type == DRM_MODE_CONNECTOR_eDP)
7799 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7804 aconnector->i2c = NULL;
7809 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7811 switch (adev->mode_info.num_crtc) {
7828 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7829 struct amdgpu_encoder *aencoder,
7830 uint32_t link_index)
7832 struct amdgpu_device *adev = drm_to_adev(dev);
7834 int res = drm_encoder_init(dev,
7836 &amdgpu_dm_encoder_funcs,
7837 DRM_MODE_ENCODER_TMDS,
7840 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7843 aencoder->encoder_id = link_index;
7845 aencoder->encoder_id = -1;
7847 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7852 static void manage_dm_interrupts(struct amdgpu_device *adev,
7853 struct amdgpu_crtc *acrtc,
7857 * We have no guarantee that the frontend index maps to the same
7858 * backend index - some even map to more than one.
7860 * TODO: Use a different interrupt or check DC itself for the mapping.
7863 amdgpu_display_crtc_idx_to_irq_type(
7868 drm_crtc_vblank_on(&acrtc->base);
7871 &adev->pageflip_irq,
7873 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7880 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7888 &adev->pageflip_irq,
7890 drm_crtc_vblank_off(&acrtc->base);
7894 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7895 struct amdgpu_crtc *acrtc)
7898 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7901 * This reads the current state for the IRQ and force reapplies
7902 * the setting to hardware.
7904 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7908 is_scaling_state_different(const struct dm_connector_state *dm_state,
7909 const struct dm_connector_state *old_dm_state)
7911 if (dm_state->scaling != old_dm_state->scaling)
7913 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7914 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7916 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7917 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7919 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7920 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7925 #ifdef CONFIG_DRM_AMD_DC_HDCP
7926 static bool is_content_protection_different(struct drm_connector_state *state,
7927 const struct drm_connector_state *old_state,
7928 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7930 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7931 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7933 /* Handle: Type0/1 change */
7934 if (old_state->hdcp_content_type != state->hdcp_content_type &&
7935 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7936 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7940 /* CP is being re enabled, ignore this
7942 * Handles: ENABLED -> DESIRED
7944 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7945 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7946 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7950 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7952 * Handles: UNDESIRED -> ENABLED
7954 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7955 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7956 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7958 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7959 * hot-plug, headless s3, dpms
7961 * Handles: DESIRED -> DESIRED (Special case)
7963 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7964 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7965 dm_con_state->update_hdcp = false;
7970 * Handles: UNDESIRED -> UNDESIRED
7971 * DESIRED -> DESIRED
7972 * ENABLED -> ENABLED
7974 if (old_state->content_protection == state->content_protection)
7978 * Handles: UNDESIRED -> DESIRED
7979 * DESIRED -> UNDESIRED
7980 * ENABLED -> UNDESIRED
7982 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7986 * Handles: DESIRED -> ENABLED
7992 static void remove_stream(struct amdgpu_device *adev,
7993 struct amdgpu_crtc *acrtc,
7994 struct dc_stream_state *stream)
7996 /* this is the update mode case */
7998 acrtc->otg_inst = -1;
7999 acrtc->enabled = false;
8002 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8003 struct dc_cursor_position *position)
8005 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8007 int xorigin = 0, yorigin = 0;
8009 if (!crtc || !plane->state->fb)
8012 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8013 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8014 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8016 plane->state->crtc_w,
8017 plane->state->crtc_h);
8021 x = plane->state->crtc_x;
8022 y = plane->state->crtc_y;
8024 if (x <= -amdgpu_crtc->max_cursor_width ||
8025 y <= -amdgpu_crtc->max_cursor_height)
8029 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8033 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8036 position->enable = true;
8037 position->translate_by_source = true;
8040 position->x_hotspot = xorigin;
8041 position->y_hotspot = yorigin;
8046 static void handle_cursor_update(struct drm_plane *plane,
8047 struct drm_plane_state *old_plane_state)
8049 struct amdgpu_device *adev = drm_to_adev(plane->dev);
8050 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8051 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8052 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8053 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8054 uint64_t address = afb ? afb->address : 0;
8055 struct dc_cursor_position position = {0};
8056 struct dc_cursor_attributes attributes;
8059 if (!plane->state->fb && !old_plane_state->fb)
8062 DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8064 amdgpu_crtc->crtc_id,
8065 plane->state->crtc_w,
8066 plane->state->crtc_h);
8068 ret = get_cursor_position(plane, crtc, &position);
8072 if (!position.enable) {
8073 /* turn off cursor */
8074 if (crtc_state && crtc_state->stream) {
8075 mutex_lock(&adev->dm.dc_lock);
8076 dc_stream_set_cursor_position(crtc_state->stream,
8078 mutex_unlock(&adev->dm.dc_lock);
8083 amdgpu_crtc->cursor_width = plane->state->crtc_w;
8084 amdgpu_crtc->cursor_height = plane->state->crtc_h;
8086 memset(&attributes, 0, sizeof(attributes));
8087 attributes.address.high_part = upper_32_bits(address);
8088 attributes.address.low_part = lower_32_bits(address);
8089 attributes.width = plane->state->crtc_w;
8090 attributes.height = plane->state->crtc_h;
8091 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8092 attributes.rotation_angle = 0;
8093 attributes.attribute_flags.value = 0;
8095 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8097 if (crtc_state->stream) {
8098 mutex_lock(&adev->dm.dc_lock);
8099 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8101 DRM_ERROR("DC failed to set cursor attributes\n");
8103 if (!dc_stream_set_cursor_position(crtc_state->stream,
8105 DRM_ERROR("DC failed to set cursor position\n");
8106 mutex_unlock(&adev->dm.dc_lock);
8110 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8113 assert_spin_locked(&acrtc->base.dev->event_lock);
8114 WARN_ON(acrtc->event);
8116 acrtc->event = acrtc->base.state->event;
8118 /* Set the flip status */
8119 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8121 /* Mark this event as consumed */
8122 acrtc->base.state->event = NULL;
8124 DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8128 static void update_freesync_state_on_stream(
8129 struct amdgpu_display_manager *dm,
8130 struct dm_crtc_state *new_crtc_state,
8131 struct dc_stream_state *new_stream,
8132 struct dc_plane_state *surface,
8133 u32 flip_timestamp_in_us)
8135 struct mod_vrr_params vrr_params;
8136 struct dc_info_packet vrr_infopacket = {0};
8137 struct amdgpu_device *adev = dm->adev;
8138 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8139 unsigned long flags;
8140 bool pack_sdp_v1_3 = false;
8146 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8147 * For now it's sufficient to just guard against these conditions.
8150 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8153 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8154 vrr_params = acrtc->dm_irq_params.vrr_params;
8157 mod_freesync_handle_preflip(
8158 dm->freesync_module,
8161 flip_timestamp_in_us,
8164 if (adev->family < AMDGPU_FAMILY_AI &&
8165 amdgpu_dm_vrr_active(new_crtc_state)) {
8166 mod_freesync_handle_v_update(dm->freesync_module,
8167 new_stream, &vrr_params);
8169 /* Need to call this before the frame ends. */
8170 dc_stream_adjust_vmin_vmax(dm->dc,
8171 new_crtc_state->stream,
8172 &vrr_params.adjust);
8176 mod_freesync_build_vrr_infopacket(
8177 dm->freesync_module,
8181 TRANSFER_FUNC_UNKNOWN,
8185 new_crtc_state->freesync_timing_changed |=
8186 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8188 sizeof(vrr_params.adjust)) != 0);
8190 new_crtc_state->freesync_vrr_info_changed |=
8191 (memcmp(&new_crtc_state->vrr_infopacket,
8193 sizeof(vrr_infopacket)) != 0);
8195 acrtc->dm_irq_params.vrr_params = vrr_params;
8196 new_crtc_state->vrr_infopacket = vrr_infopacket;
8198 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8199 new_stream->vrr_infopacket = vrr_infopacket;
8201 if (new_crtc_state->freesync_vrr_info_changed)
8202 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8203 new_crtc_state->base.crtc->base.id,
8204 (int)new_crtc_state->base.vrr_enabled,
8205 (int)vrr_params.state);
8207 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8210 static void update_stream_irq_parameters(
8211 struct amdgpu_display_manager *dm,
8212 struct dm_crtc_state *new_crtc_state)
8214 struct dc_stream_state *new_stream = new_crtc_state->stream;
8215 struct mod_vrr_params vrr_params;
8216 struct mod_freesync_config config = new_crtc_state->freesync_config;
8217 struct amdgpu_device *adev = dm->adev;
8218 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8219 unsigned long flags;
8225 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8226 * For now it's sufficient to just guard against these conditions.
8228 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8231 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8232 vrr_params = acrtc->dm_irq_params.vrr_params;
8234 if (new_crtc_state->vrr_supported &&
8235 config.min_refresh_in_uhz &&
8236 config.max_refresh_in_uhz) {
8238 * if freesync compatible mode was set, config.state will be set
8241 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8242 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8243 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8244 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8245 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8246 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8247 vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8249 config.state = new_crtc_state->base.vrr_enabled ?
8250 VRR_STATE_ACTIVE_VARIABLE :
8254 config.state = VRR_STATE_UNSUPPORTED;
8257 mod_freesync_build_vrr_params(dm->freesync_module,
8259 &config, &vrr_params);
8261 new_crtc_state->freesync_timing_changed |=
8262 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8263 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8265 new_crtc_state->freesync_config = config;
8266 /* Copy state for access from DM IRQ handler */
8267 acrtc->dm_irq_params.freesync_config = config;
8268 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8269 acrtc->dm_irq_params.vrr_params = vrr_params;
8270 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8273 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8274 struct dm_crtc_state *new_state)
8276 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8277 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8279 if (!old_vrr_active && new_vrr_active) {
8280 /* Transition VRR inactive -> active:
8281 * While VRR is active, we must not disable vblank irq, as a
8282 * reenable after disable would compute bogus vblank/pflip
8283 * timestamps if it likely happened inside display front-porch.
8285 * We also need vupdate irq for the actual core vblank handling
8288 dm_set_vupdate_irq(new_state->base.crtc, true);
8289 drm_crtc_vblank_get(new_state->base.crtc);
8290 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8291 __func__, new_state->base.crtc->base.id);
8292 } else if (old_vrr_active && !new_vrr_active) {
8293 /* Transition VRR active -> inactive:
8294 * Allow vblank irq disable again for fixed refresh rate.
8296 dm_set_vupdate_irq(new_state->base.crtc, false);
8297 drm_crtc_vblank_put(new_state->base.crtc);
8298 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8299 __func__, new_state->base.crtc->base.id);
8303 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8305 struct drm_plane *plane;
8306 struct drm_plane_state *old_plane_state;
8310 * TODO: Make this per-stream so we don't issue redundant updates for
8311 * commits with multiple streams.
8313 for_each_old_plane_in_state(state, plane, old_plane_state, i)
8314 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8315 handle_cursor_update(plane, old_plane_state);
8318 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8319 struct dc_state *dc_state,
8320 struct drm_device *dev,
8321 struct amdgpu_display_manager *dm,
8322 struct drm_crtc *pcrtc,
8323 bool wait_for_vblank)
8326 uint64_t timestamp_ns;
8327 struct drm_plane *plane;
8328 struct drm_plane_state *old_plane_state, *new_plane_state;
8329 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8330 struct drm_crtc_state *new_pcrtc_state =
8331 drm_atomic_get_new_crtc_state(state, pcrtc);
8332 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8333 struct dm_crtc_state *dm_old_crtc_state =
8334 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8335 int planes_count = 0, vpos, hpos;
8337 unsigned long flags;
8338 struct amdgpu_bo *abo;
8339 uint32_t target_vblank, last_flip_vblank;
8340 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8341 bool pflip_present = false;
8343 struct dc_surface_update surface_updates[MAX_SURFACES];
8344 struct dc_plane_info plane_infos[MAX_SURFACES];
8345 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8346 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8347 struct dc_stream_update stream_update;
8350 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8353 dm_error("Failed to allocate update bundle\n");
8358 * Disable the cursor first if we're disabling all the planes.
8359 * It'll remain on the screen after the planes are re-enabled
8362 if (acrtc_state->active_planes == 0)
8363 amdgpu_dm_commit_cursors(state);
8365 /* update planes when needed */
8366 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8367 struct drm_crtc *crtc = new_plane_state->crtc;
8368 struct drm_crtc_state *new_crtc_state;
8369 struct drm_framebuffer *fb = new_plane_state->fb;
8370 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8371 bool plane_needs_flip;
8372 struct dc_plane_state *dc_plane;
8373 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8375 /* Cursor plane is handled after stream updates */
8376 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8379 if (!fb || !crtc || pcrtc != crtc)
8382 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8383 if (!new_crtc_state->active)
8386 dc_plane = dm_new_plane_state->dc_state;
8388 bundle->surface_updates[planes_count].surface = dc_plane;
8389 if (new_pcrtc_state->color_mgmt_changed) {
8390 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8391 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8392 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8395 fill_dc_scaling_info(new_plane_state,
8396 &bundle->scaling_infos[planes_count]);
8398 bundle->surface_updates[planes_count].scaling_info =
8399 &bundle->scaling_infos[planes_count];
8401 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8403 pflip_present = pflip_present || plane_needs_flip;
8405 if (!plane_needs_flip) {
8410 abo = gem_to_amdgpu_bo(fb->obj[0]);
8413 * Wait for all fences on this FB. Do limited wait to avoid
8414 * deadlock during GPU reset when this fence will not signal
8415 * but we hold reservation lock for the BO.
8417 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
8419 msecs_to_jiffies(5000));
8420 if (unlikely(r <= 0))
8421 DRM_ERROR("Waiting for fences timed out!");
8423 fill_dc_plane_info_and_addr(
8424 dm->adev, new_plane_state,
8426 &bundle->plane_infos[planes_count],
8427 &bundle->flip_addrs[planes_count].address,
8428 afb->tmz_surface, false);
8430 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
8431 new_plane_state->plane->index,
8432 bundle->plane_infos[planes_count].dcc.enable);
8434 bundle->surface_updates[planes_count].plane_info =
8435 &bundle->plane_infos[planes_count];
8438 * Only allow immediate flips for fast updates that don't
8439 * change FB pitch, DCC state, rotation or mirroing.
8441 bundle->flip_addrs[planes_count].flip_immediate =
8442 crtc->state->async_flip &&
8443 acrtc_state->update_type == UPDATE_TYPE_FAST;
8445 timestamp_ns = ktime_get_ns();
8446 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8447 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8448 bundle->surface_updates[planes_count].surface = dc_plane;
8450 if (!bundle->surface_updates[planes_count].surface) {
8451 DRM_ERROR("No surface for CRTC: id=%d\n",
8452 acrtc_attach->crtc_id);
8456 if (plane == pcrtc->primary)
8457 update_freesync_state_on_stream(
8460 acrtc_state->stream,
8462 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
8464 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
8466 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8467 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
8473 if (pflip_present) {
8475 /* Use old throttling in non-vrr fixed refresh rate mode
8476 * to keep flip scheduling based on target vblank counts
8477 * working in a backwards compatible way, e.g., for
8478 * clients using the GLX_OML_sync_control extension or
8479 * DRI3/Present extension with defined target_msc.
8481 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
8484 /* For variable refresh rate mode only:
8485 * Get vblank of last completed flip to avoid > 1 vrr
8486 * flips per video frame by use of throttling, but allow
8487 * flip programming anywhere in the possibly large
8488 * variable vrr vblank interval for fine-grained flip
8489 * timing control and more opportunity to avoid stutter
8490 * on late submission of flips.
8492 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8493 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
8494 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8497 target_vblank = last_flip_vblank + wait_for_vblank;
8500 * Wait until we're out of the vertical blank period before the one
8501 * targeted by the flip
8503 while ((acrtc_attach->enabled &&
8504 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8505 0, &vpos, &hpos, NULL,
8506 NULL, &pcrtc->hwmode)
8507 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8508 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8509 (int)(target_vblank -
8510 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8511 usleep_range(1000, 1100);
8515 * Prepare the flip event for the pageflip interrupt to handle.
8517 * This only works in the case where we've already turned on the
8518 * appropriate hardware blocks (eg. HUBP) so in the transition case
8519 * from 0 -> n planes we have to skip a hardware generated event
8520 * and rely on sending it from software.
8522 if (acrtc_attach->base.state->event &&
8523 acrtc_state->active_planes > 0) {
8524 drm_crtc_vblank_get(pcrtc);
8526 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8528 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8529 prepare_flip_isr(acrtc_attach);
8531 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8534 if (acrtc_state->stream) {
8535 if (acrtc_state->freesync_vrr_info_changed)
8536 bundle->stream_update.vrr_infopacket =
8537 &acrtc_state->stream->vrr_infopacket;
8541 /* Update the planes if changed or disable if we don't have any. */
8542 if ((planes_count || acrtc_state->active_planes == 0) &&
8543 acrtc_state->stream) {
8544 bundle->stream_update.stream = acrtc_state->stream;
8545 if (new_pcrtc_state->mode_changed) {
8546 bundle->stream_update.src = acrtc_state->stream->src;
8547 bundle->stream_update.dst = acrtc_state->stream->dst;
8550 if (new_pcrtc_state->color_mgmt_changed) {
8552 * TODO: This isn't fully correct since we've actually
8553 * already modified the stream in place.
8555 bundle->stream_update.gamut_remap =
8556 &acrtc_state->stream->gamut_remap_matrix;
8557 bundle->stream_update.output_csc_transform =
8558 &acrtc_state->stream->csc_color_matrix;
8559 bundle->stream_update.out_transfer_func =
8560 acrtc_state->stream->out_transfer_func;
8563 acrtc_state->stream->abm_level = acrtc_state->abm_level;
8564 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
8565 bundle->stream_update.abm_level = &acrtc_state->abm_level;
8568 * If FreeSync state on the stream has changed then we need to
8569 * re-adjust the min/max bounds now that DC doesn't handle this
8570 * as part of commit.
8572 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
8573 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8574 dc_stream_adjust_vmin_vmax(
8575 dm->dc, acrtc_state->stream,
8576 &acrtc_attach->dm_irq_params.vrr_params.adjust);
8577 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8579 mutex_lock(&dm->dc_lock);
8580 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8581 acrtc_state->stream->link->psr_settings.psr_allow_active)
8582 amdgpu_dm_psr_disable(acrtc_state->stream);
8584 dc_commit_updates_for_stream(dm->dc,
8585 bundle->surface_updates,
8587 acrtc_state->stream,
8588 &bundle->stream_update,
8592 * Enable or disable the interrupts on the backend.
8594 * Most pipes are put into power gating when unused.
8596 * When power gating is enabled on a pipe we lose the
8597 * interrupt enablement state when power gating is disabled.
8599 * So we need to update the IRQ control state in hardware
8600 * whenever the pipe turns on (since it could be previously
8601 * power gated) or off (since some pipes can't be power gated
8604 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8605 dm_update_pflip_irq_state(drm_to_adev(dev),
8608 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8609 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8610 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8611 amdgpu_dm_link_setup_psr(acrtc_state->stream);
8612 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
8613 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8614 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
8615 amdgpu_dm_psr_enable(acrtc_state->stream);
8618 mutex_unlock(&dm->dc_lock);
8622 * Update cursor state *after* programming all the planes.
8623 * This avoids redundant programming in the case where we're going
8624 * to be disabling a single plane - those pipes are being disabled.
8626 if (acrtc_state->active_planes)
8627 amdgpu_dm_commit_cursors(state);
8633 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8634 struct drm_atomic_state *state)
8636 struct amdgpu_device *adev = drm_to_adev(dev);
8637 struct amdgpu_dm_connector *aconnector;
8638 struct drm_connector *connector;
8639 struct drm_connector_state *old_con_state, *new_con_state;
8640 struct drm_crtc_state *new_crtc_state;
8641 struct dm_crtc_state *new_dm_crtc_state;
8642 const struct dc_stream_status *status;
8645 /* Notify device removals. */
8646 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8647 if (old_con_state->crtc != new_con_state->crtc) {
8648 /* CRTC changes require notification. */
8652 if (!new_con_state->crtc)
8655 new_crtc_state = drm_atomic_get_new_crtc_state(
8656 state, new_con_state->crtc);
8658 if (!new_crtc_state)
8661 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8665 aconnector = to_amdgpu_dm_connector(connector);
8667 mutex_lock(&adev->dm.audio_lock);
8668 inst = aconnector->audio_inst;
8669 aconnector->audio_inst = -1;
8670 mutex_unlock(&adev->dm.audio_lock);
8672 amdgpu_dm_audio_eld_notify(adev, inst);
8675 /* Notify audio device additions. */
8676 for_each_new_connector_in_state(state, connector, new_con_state, i) {
8677 if (!new_con_state->crtc)
8680 new_crtc_state = drm_atomic_get_new_crtc_state(
8681 state, new_con_state->crtc);
8683 if (!new_crtc_state)
8686 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8689 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8690 if (!new_dm_crtc_state->stream)
8693 status = dc_stream_get_status(new_dm_crtc_state->stream);
8697 aconnector = to_amdgpu_dm_connector(connector);
8699 mutex_lock(&adev->dm.audio_lock);
8700 inst = status->audio_inst;
8701 aconnector->audio_inst = inst;
8702 mutex_unlock(&adev->dm.audio_lock);
8704 amdgpu_dm_audio_eld_notify(adev, inst);
8709 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8710 * @crtc_state: the DRM CRTC state
8711 * @stream_state: the DC stream state.
8713 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8714 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8716 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8717 struct dc_stream_state *stream_state)
8719 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8723 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8724 * @state: The atomic state to commit
8726 * This will tell DC to commit the constructed DC state from atomic_check,
8727 * programming the hardware. Any failures here implies a hardware failure, since
8728 * atomic check should have filtered anything non-kosher.
8730 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8732 struct drm_device *dev = state->dev;
8733 struct amdgpu_device *adev = drm_to_adev(dev);
8734 struct amdgpu_display_manager *dm = &adev->dm;
8735 struct dm_atomic_state *dm_state;
8736 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8738 struct drm_crtc *crtc;
8739 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8740 unsigned long flags;
8741 bool wait_for_vblank = true;
8742 struct drm_connector *connector;
8743 struct drm_connector_state *old_con_state, *new_con_state;
8744 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8745 int crtc_disable_count = 0;
8746 bool mode_set_reset_required = false;
8748 trace_amdgpu_dm_atomic_commit_tail_begin(state);
8750 drm_atomic_helper_update_legacy_modeset_state(dev, state);
8752 dm_state = dm_atomic_get_new_state(state);
8753 if (dm_state && dm_state->context) {
8754 dc_state = dm_state->context;
8756 /* No state changes, retain current state. */
8757 dc_state_temp = dc_create_state(dm->dc);
8758 ASSERT(dc_state_temp);
8759 dc_state = dc_state_temp;
8760 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8763 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8764 new_crtc_state, i) {
8765 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8767 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8769 if (old_crtc_state->active &&
8770 (!new_crtc_state->active ||
8771 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8772 manage_dm_interrupts(adev, acrtc, false);
8773 dc_stream_release(dm_old_crtc_state->stream);
8777 drm_atomic_helper_calc_timestamping_constants(state);
8779 /* update changed items */
8780 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8781 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8783 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8784 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8787 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8788 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8789 "connectors_changed:%d\n",
8791 new_crtc_state->enable,
8792 new_crtc_state->active,
8793 new_crtc_state->planes_changed,
8794 new_crtc_state->mode_changed,
8795 new_crtc_state->active_changed,
8796 new_crtc_state->connectors_changed);
8798 /* Disable cursor if disabling crtc */
8799 if (old_crtc_state->active && !new_crtc_state->active) {
8800 struct dc_cursor_position position;
8802 memset(&position, 0, sizeof(position));
8803 mutex_lock(&dm->dc_lock);
8804 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8805 mutex_unlock(&dm->dc_lock);
8808 /* Copy all transient state flags into dc state */
8809 if (dm_new_crtc_state->stream) {
8810 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8811 dm_new_crtc_state->stream);
8814 /* handles headless hotplug case, updating new_state and
8815 * aconnector as needed
8818 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8820 DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8822 if (!dm_new_crtc_state->stream) {
8824 * this could happen because of issues with
8825 * userspace notifications delivery.
8826 * In this case userspace tries to set mode on
8827 * display which is disconnected in fact.
8828 * dc_sink is NULL in this case on aconnector.
8829 * We expect reset mode will come soon.
8831 * This can also happen when unplug is done
8832 * during resume sequence ended
8834 * In this case, we want to pretend we still
8835 * have a sink to keep the pipe running so that
8836 * hw state is consistent with the sw state
8838 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8839 __func__, acrtc->base.base.id);
8843 if (dm_old_crtc_state->stream)
8844 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8846 pm_runtime_get_noresume(dev->dev);
8848 acrtc->enabled = true;
8849 acrtc->hw_mode = new_crtc_state->mode;
8850 crtc->hwmode = new_crtc_state->mode;
8851 mode_set_reset_required = true;
8852 } else if (modereset_required(new_crtc_state)) {
8853 DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8854 /* i.e. reset mode */
8855 if (dm_old_crtc_state->stream)
8856 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8858 mode_set_reset_required = true;
8860 } /* for_each_crtc_in_state() */
8863 /* if there mode set or reset, disable eDP PSR */
8864 if (mode_set_reset_required)
8865 amdgpu_dm_psr_disable_all(dm);
8867 dm_enable_per_frame_crtc_master_sync(dc_state);
8868 mutex_lock(&dm->dc_lock);
8869 WARN_ON(!dc_commit_state(dm->dc, dc_state));
8870 #if defined(CONFIG_DRM_AMD_DC_DCN)
8871 /* Allow idle optimization when vblank count is 0 for display off */
8872 if (dm->active_vblank_irq_count == 0)
8873 dc_allow_idle_optimizations(dm->dc,true);
8875 mutex_unlock(&dm->dc_lock);
8878 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8879 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8881 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8883 if (dm_new_crtc_state->stream != NULL) {
8884 const struct dc_stream_status *status =
8885 dc_stream_get_status(dm_new_crtc_state->stream);
8888 status = dc_stream_get_status_from_state(dc_state,
8889 dm_new_crtc_state->stream);
8891 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8893 acrtc->otg_inst = status->primary_otg_inst;
8896 #ifdef CONFIG_DRM_AMD_DC_HDCP
8897 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8898 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8899 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8900 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8902 new_crtc_state = NULL;
8905 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8907 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8909 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8910 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8911 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8912 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8913 dm_new_con_state->update_hdcp = true;
8917 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8918 hdcp_update_display(
8919 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8920 new_con_state->hdcp_content_type,
8921 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
8925 /* Handle connector state changes */
8926 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8927 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8928 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8929 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8930 struct dc_surface_update dummy_updates[MAX_SURFACES];
8931 struct dc_stream_update stream_update;
8932 struct dc_info_packet hdr_packet;
8933 struct dc_stream_status *status = NULL;
8934 bool abm_changed, hdr_changed, scaling_changed;
8936 memset(&dummy_updates, 0, sizeof(dummy_updates));
8937 memset(&stream_update, 0, sizeof(stream_update));
8940 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8941 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8944 /* Skip any modesets/resets */
8945 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8948 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8949 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8951 scaling_changed = is_scaling_state_different(dm_new_con_state,
8954 abm_changed = dm_new_crtc_state->abm_level !=
8955 dm_old_crtc_state->abm_level;
8958 is_hdr_metadata_different(old_con_state, new_con_state);
8960 if (!scaling_changed && !abm_changed && !hdr_changed)
8963 stream_update.stream = dm_new_crtc_state->stream;
8964 if (scaling_changed) {
8965 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8966 dm_new_con_state, dm_new_crtc_state->stream);
8968 stream_update.src = dm_new_crtc_state->stream->src;
8969 stream_update.dst = dm_new_crtc_state->stream->dst;
8973 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8975 stream_update.abm_level = &dm_new_crtc_state->abm_level;
8979 fill_hdr_info_packet(new_con_state, &hdr_packet);
8980 stream_update.hdr_static_metadata = &hdr_packet;
8983 status = dc_stream_get_status(dm_new_crtc_state->stream);
8985 WARN_ON(!status->plane_count);
8988 * TODO: DC refuses to perform stream updates without a dc_surface_update.
8989 * Here we create an empty update on each plane.
8990 * To fix this, DC should permit updating only stream properties.
8992 for (j = 0; j < status->plane_count; j++)
8993 dummy_updates[j].surface = status->plane_states[0];
8996 mutex_lock(&dm->dc_lock);
8997 dc_commit_updates_for_stream(dm->dc,
8999 status->plane_count,
9000 dm_new_crtc_state->stream,
9003 mutex_unlock(&dm->dc_lock);
9006 /* Count number of newly disabled CRTCs for dropping PM refs later. */
9007 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9008 new_crtc_state, i) {
9009 if (old_crtc_state->active && !new_crtc_state->active)
9010 crtc_disable_count++;
9012 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9013 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9015 /* For freesync config update on crtc state and params for irq */
9016 update_stream_irq_parameters(dm, dm_new_crtc_state);
9018 /* Handle vrr on->off / off->on transitions */
9019 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9024 * Enable interrupts for CRTCs that are newly enabled or went through
9025 * a modeset. It was intentionally deferred until after the front end
9026 * state was modified to wait until the OTG was on and so the IRQ
9027 * handlers didn't access stale or invalid state.
9029 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9030 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9031 #ifdef CONFIG_DEBUG_FS
9032 bool configure_crc = false;
9033 enum amdgpu_dm_pipe_crc_source cur_crc_src;
9034 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9035 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9037 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9038 cur_crc_src = acrtc->dm_irq_params.crc_src;
9039 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9041 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9043 if (new_crtc_state->active &&
9044 (!old_crtc_state->active ||
9045 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9046 dc_stream_retain(dm_new_crtc_state->stream);
9047 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9048 manage_dm_interrupts(adev, acrtc, true);
9050 #ifdef CONFIG_DEBUG_FS
9052 * Frontend may have changed so reapply the CRC capture
9053 * settings for the stream.
9055 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9057 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9058 configure_crc = true;
9059 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9060 if (amdgpu_dm_crc_window_is_activated(crtc)) {
9061 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9062 acrtc->dm_irq_params.crc_window.update_win = true;
9063 acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9064 spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9065 crc_rd_wrk->crtc = crtc;
9066 spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9067 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9073 if (amdgpu_dm_crtc_configure_crc_source(
9074 crtc, dm_new_crtc_state, cur_crc_src))
9075 DRM_DEBUG_DRIVER("Failed to configure crc source");
9080 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9081 if (new_crtc_state->async_flip)
9082 wait_for_vblank = false;
9084 /* update planes when needed per crtc*/
9085 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9086 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9088 if (dm_new_crtc_state->stream)
9089 amdgpu_dm_commit_planes(state, dc_state, dev,
9090 dm, crtc, wait_for_vblank);
9093 /* Update audio instances for each connector. */
9094 amdgpu_dm_commit_audio(dev, state);
9097 * send vblank event on all events not handled in flip and
9098 * mark consumed event for drm_atomic_helper_commit_hw_done
9100 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9101 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9103 if (new_crtc_state->event)
9104 drm_send_event_locked(dev, &new_crtc_state->event->base);
9106 new_crtc_state->event = NULL;
9108 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9110 /* Signal HW programming completion */
9111 drm_atomic_helper_commit_hw_done(state);
9113 if (wait_for_vblank)
9114 drm_atomic_helper_wait_for_flip_done(dev, state);
9116 drm_atomic_helper_cleanup_planes(dev, state);
9118 /* return the stolen vga memory back to VRAM */
9119 if (!adev->mman.keep_stolen_vga_memory)
9120 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9121 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9124 * Finally, drop a runtime PM reference for each newly disabled CRTC,
9125 * so we can put the GPU into runtime suspend if we're not driving any
9128 for (i = 0; i < crtc_disable_count; i++)
9129 pm_runtime_put_autosuspend(dev->dev);
9130 pm_runtime_mark_last_busy(dev->dev);
9133 dc_release_state(dc_state_temp);
9137 static int dm_force_atomic_commit(struct drm_connector *connector)
9140 struct drm_device *ddev = connector->dev;
9141 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9142 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9143 struct drm_plane *plane = disconnected_acrtc->base.primary;
9144 struct drm_connector_state *conn_state;
9145 struct drm_crtc_state *crtc_state;
9146 struct drm_plane_state *plane_state;
9151 state->acquire_ctx = ddev->mode_config.acquire_ctx;
9153 /* Construct an atomic state to restore previous display setting */
9156 * Attach connectors to drm_atomic_state
9158 conn_state = drm_atomic_get_connector_state(state, connector);
9160 ret = PTR_ERR_OR_ZERO(conn_state);
9164 /* Attach crtc to drm_atomic_state*/
9165 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9167 ret = PTR_ERR_OR_ZERO(crtc_state);
9171 /* force a restore */
9172 crtc_state->mode_changed = true;
9174 /* Attach plane to drm_atomic_state */
9175 plane_state = drm_atomic_get_plane_state(state, plane);
9177 ret = PTR_ERR_OR_ZERO(plane_state);
9181 /* Call commit internally with the state we just constructed */
9182 ret = drm_atomic_commit(state);
9185 drm_atomic_state_put(state);
9187 DRM_ERROR("Restoring old state failed with %i\n", ret);
9193 * This function handles all cases when set mode does not come upon hotplug.
9194 * This includes when a display is unplugged then plugged back into the
9195 * same port and when running without usermode desktop manager supprot
9197 void dm_restore_drm_connector_state(struct drm_device *dev,
9198 struct drm_connector *connector)
9200 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9201 struct amdgpu_crtc *disconnected_acrtc;
9202 struct dm_crtc_state *acrtc_state;
9204 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9207 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9208 if (!disconnected_acrtc)
9211 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9212 if (!acrtc_state->stream)
9216 * If the previous sink is not released and different from the current,
9217 * we deduce we are in a state where we can not rely on usermode call
9218 * to turn on the display, so we do it here
9220 if (acrtc_state->stream->sink != aconnector->dc_sink)
9221 dm_force_atomic_commit(&aconnector->base);
9225 * Grabs all modesetting locks to serialize against any blocking commits,
9226 * Waits for completion of all non blocking commits.
9228 static int do_aquire_global_lock(struct drm_device *dev,
9229 struct drm_atomic_state *state)
9231 struct drm_crtc *crtc;
9232 struct drm_crtc_commit *commit;
9236 * Adding all modeset locks to aquire_ctx will
9237 * ensure that when the framework release it the
9238 * extra locks we are locking here will get released to
9240 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9244 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9245 spin_lock(&crtc->commit_lock);
9246 commit = list_first_entry_or_null(&crtc->commit_list,
9247 struct drm_crtc_commit, commit_entry);
9249 drm_crtc_commit_get(commit);
9250 spin_unlock(&crtc->commit_lock);
9256 * Make sure all pending HW programming completed and
9259 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9262 ret = wait_for_completion_interruptible_timeout(
9263 &commit->flip_done, 10*HZ);
9266 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9267 "timed out\n", crtc->base.id, crtc->name);
9269 drm_crtc_commit_put(commit);
9272 return ret < 0 ? ret : 0;
9275 static void get_freesync_config_for_crtc(
9276 struct dm_crtc_state *new_crtc_state,
9277 struct dm_connector_state *new_con_state)
9279 struct mod_freesync_config config = {0};
9280 struct amdgpu_dm_connector *aconnector =
9281 to_amdgpu_dm_connector(new_con_state->base.connector);
9282 struct drm_display_mode *mode = &new_crtc_state->base.mode;
9283 int vrefresh = drm_mode_vrefresh(mode);
9284 bool fs_vid_mode = false;
9286 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9287 vrefresh >= aconnector->min_vfreq &&
9288 vrefresh <= aconnector->max_vfreq;
9290 if (new_crtc_state->vrr_supported) {
9291 new_crtc_state->stream->ignore_msa_timing_param = true;
9292 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9294 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9295 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9296 config.vsif_supported = true;
9300 config.state = VRR_STATE_ACTIVE_FIXED;
9301 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9303 } else if (new_crtc_state->base.vrr_enabled) {
9304 config.state = VRR_STATE_ACTIVE_VARIABLE;
9306 config.state = VRR_STATE_INACTIVE;
9310 new_crtc_state->freesync_config = config;
9313 static void reset_freesync_config_for_crtc(
9314 struct dm_crtc_state *new_crtc_state)
9316 new_crtc_state->vrr_supported = false;
9318 memset(&new_crtc_state->vrr_infopacket, 0,
9319 sizeof(new_crtc_state->vrr_infopacket));
9323 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9324 struct drm_crtc_state *new_crtc_state)
9326 struct drm_display_mode old_mode, new_mode;
9328 if (!old_crtc_state || !new_crtc_state)
9331 old_mode = old_crtc_state->mode;
9332 new_mode = new_crtc_state->mode;
9334 if (old_mode.clock == new_mode.clock &&
9335 old_mode.hdisplay == new_mode.hdisplay &&
9336 old_mode.vdisplay == new_mode.vdisplay &&
9337 old_mode.htotal == new_mode.htotal &&
9338 old_mode.vtotal != new_mode.vtotal &&
9339 old_mode.hsync_start == new_mode.hsync_start &&
9340 old_mode.vsync_start != new_mode.vsync_start &&
9341 old_mode.hsync_end == new_mode.hsync_end &&
9342 old_mode.vsync_end != new_mode.vsync_end &&
9343 old_mode.hskew == new_mode.hskew &&
9344 old_mode.vscan == new_mode.vscan &&
9345 (old_mode.vsync_end - old_mode.vsync_start) ==
9346 (new_mode.vsync_end - new_mode.vsync_start))
9352 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9353 uint64_t num, den, res;
9354 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9356 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9358 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9359 den = (unsigned long long)new_crtc_state->mode.htotal *
9360 (unsigned long long)new_crtc_state->mode.vtotal;
9362 res = div_u64(num, den);
9363 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9366 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9367 struct drm_atomic_state *state,
9368 struct drm_crtc *crtc,
9369 struct drm_crtc_state *old_crtc_state,
9370 struct drm_crtc_state *new_crtc_state,
9372 bool *lock_and_validation_needed)
9374 struct dm_atomic_state *dm_state = NULL;
9375 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9376 struct dc_stream_state *new_stream;
9380 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9381 * update changed items
9383 struct amdgpu_crtc *acrtc = NULL;
9384 struct amdgpu_dm_connector *aconnector = NULL;
9385 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9386 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
9390 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9391 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9392 acrtc = to_amdgpu_crtc(crtc);
9393 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
9395 /* TODO This hack should go away */
9396 if (aconnector && enable) {
9397 /* Make sure fake sink is created in plug-in scenario */
9398 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9400 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9403 if (IS_ERR(drm_new_conn_state)) {
9404 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9408 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9409 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
9411 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9414 new_stream = create_validate_stream_for_sink(aconnector,
9415 &new_crtc_state->mode,
9417 dm_old_crtc_state->stream);
9420 * we can have no stream on ACTION_SET if a display
9421 * was disconnected during S3, in this case it is not an
9422 * error, the OS will be updated after detection, and
9423 * will do the right thing on next atomic commit
9427 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9428 __func__, acrtc->base.base.id);
9434 * TODO: Check VSDB bits to decide whether this should
9435 * be enabled or not.
9437 new_stream->triggered_crtc_reset.enabled =
9438 dm->force_timing_sync;
9440 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9442 ret = fill_hdr_info_packet(drm_new_conn_state,
9443 &new_stream->hdr_static_metadata);
9448 * If we already removed the old stream from the context
9449 * (and set the new stream to NULL) then we can't reuse
9450 * the old stream even if the stream and scaling are unchanged.
9451 * We'll hit the BUG_ON and black screen.
9453 * TODO: Refactor this function to allow this check to work
9454 * in all conditions.
9456 if (amdgpu_freesync_vid_mode &&
9457 dm_new_crtc_state->stream &&
9458 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9461 if (dm_new_crtc_state->stream &&
9462 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9463 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9464 new_crtc_state->mode_changed = false;
9465 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9466 new_crtc_state->mode_changed);
9470 /* mode_changed flag may get updated above, need to check again */
9471 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9475 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9476 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9477 "connectors_changed:%d\n",
9479 new_crtc_state->enable,
9480 new_crtc_state->active,
9481 new_crtc_state->planes_changed,
9482 new_crtc_state->mode_changed,
9483 new_crtc_state->active_changed,
9484 new_crtc_state->connectors_changed);
9486 /* Remove stream for any changed/disabled CRTC */
9489 if (!dm_old_crtc_state->stream)
9492 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
9493 is_timing_unchanged_for_freesync(new_crtc_state,
9495 new_crtc_state->mode_changed = false;
9497 "Mode change not required for front porch change, "
9498 "setting mode_changed to %d",
9499 new_crtc_state->mode_changed);
9501 set_freesync_fixed_config(dm_new_crtc_state);
9504 } else if (amdgpu_freesync_vid_mode && aconnector &&
9505 is_freesync_video_mode(&new_crtc_state->mode,
9507 set_freesync_fixed_config(dm_new_crtc_state);
9510 ret = dm_atomic_get_state(state, &dm_state);
9514 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9517 /* i.e. reset mode */
9518 if (dc_remove_stream_from_ctx(
9521 dm_old_crtc_state->stream) != DC_OK) {
9526 dc_stream_release(dm_old_crtc_state->stream);
9527 dm_new_crtc_state->stream = NULL;
9529 reset_freesync_config_for_crtc(dm_new_crtc_state);
9531 *lock_and_validation_needed = true;
9533 } else {/* Add stream for any updated/enabled CRTC */
9535 * Quick fix to prevent NULL pointer on new_stream when
9536 * added MST connectors not found in existing crtc_state in the chained mode
9537 * TODO: need to dig out the root cause of that
9539 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
9542 if (modereset_required(new_crtc_state))
9545 if (modeset_required(new_crtc_state, new_stream,
9546 dm_old_crtc_state->stream)) {
9548 WARN_ON(dm_new_crtc_state->stream);
9550 ret = dm_atomic_get_state(state, &dm_state);
9554 dm_new_crtc_state->stream = new_stream;
9556 dc_stream_retain(new_stream);
9558 DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
9561 if (dc_add_stream_to_ctx(
9564 dm_new_crtc_state->stream) != DC_OK) {
9569 *lock_and_validation_needed = true;
9574 /* Release extra reference */
9576 dc_stream_release(new_stream);
9579 * We want to do dc stream updates that do not require a
9580 * full modeset below.
9582 if (!(enable && aconnector && new_crtc_state->active))
9585 * Given above conditions, the dc state cannot be NULL because:
9586 * 1. We're in the process of enabling CRTCs (just been added
9587 * to the dc context, or already is on the context)
9588 * 2. Has a valid connector attached, and
9589 * 3. Is currently active and enabled.
9590 * => The dc stream state currently exists.
9592 BUG_ON(dm_new_crtc_state->stream == NULL);
9594 /* Scaling or underscan settings */
9595 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
9596 update_stream_scaling_settings(
9597 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
9600 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9603 * Color management settings. We also update color properties
9604 * when a modeset is needed, to ensure it gets reprogrammed.
9606 if (dm_new_crtc_state->base.color_mgmt_changed ||
9607 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9608 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
9613 /* Update Freesync settings. */
9614 get_freesync_config_for_crtc(dm_new_crtc_state,
9621 dc_stream_release(new_stream);
9625 static bool should_reset_plane(struct drm_atomic_state *state,
9626 struct drm_plane *plane,
9627 struct drm_plane_state *old_plane_state,
9628 struct drm_plane_state *new_plane_state)
9630 struct drm_plane *other;
9631 struct drm_plane_state *old_other_state, *new_other_state;
9632 struct drm_crtc_state *new_crtc_state;
9636 * TODO: Remove this hack once the checks below are sufficient
9637 * enough to determine when we need to reset all the planes on
9640 if (state->allow_modeset)
9643 /* Exit early if we know that we're adding or removing the plane. */
9644 if (old_plane_state->crtc != new_plane_state->crtc)
9647 /* old crtc == new_crtc == NULL, plane not in context. */
9648 if (!new_plane_state->crtc)
9652 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9654 if (!new_crtc_state)
9657 /* CRTC Degamma changes currently require us to recreate planes. */
9658 if (new_crtc_state->color_mgmt_changed)
9661 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9665 * If there are any new primary or overlay planes being added or
9666 * removed then the z-order can potentially change. To ensure
9667 * correct z-order and pipe acquisition the current DC architecture
9668 * requires us to remove and recreate all existing planes.
9670 * TODO: Come up with a more elegant solution for this.
9672 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9673 struct amdgpu_framebuffer *old_afb, *new_afb;
9674 if (other->type == DRM_PLANE_TYPE_CURSOR)
9677 if (old_other_state->crtc != new_plane_state->crtc &&
9678 new_other_state->crtc != new_plane_state->crtc)
9681 if (old_other_state->crtc != new_other_state->crtc)
9684 /* Src/dst size and scaling updates. */
9685 if (old_other_state->src_w != new_other_state->src_w ||
9686 old_other_state->src_h != new_other_state->src_h ||
9687 old_other_state->crtc_w != new_other_state->crtc_w ||
9688 old_other_state->crtc_h != new_other_state->crtc_h)
9691 /* Rotation / mirroring updates. */
9692 if (old_other_state->rotation != new_other_state->rotation)
9695 /* Blending updates. */
9696 if (old_other_state->pixel_blend_mode !=
9697 new_other_state->pixel_blend_mode)
9700 /* Alpha updates. */
9701 if (old_other_state->alpha != new_other_state->alpha)
9704 /* Colorspace changes. */
9705 if (old_other_state->color_range != new_other_state->color_range ||
9706 old_other_state->color_encoding != new_other_state->color_encoding)
9709 /* Framebuffer checks fall at the end. */
9710 if (!old_other_state->fb || !new_other_state->fb)
9713 /* Pixel format changes can require bandwidth updates. */
9714 if (old_other_state->fb->format != new_other_state->fb->format)
9717 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9718 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9720 /* Tiling and DCC changes also require bandwidth updates. */
9721 if (old_afb->tiling_flags != new_afb->tiling_flags ||
9722 old_afb->base.modifier != new_afb->base.modifier)
9729 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9730 struct drm_plane_state *new_plane_state,
9731 struct drm_framebuffer *fb)
9733 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9734 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9738 if (fb->width > new_acrtc->max_cursor_width ||
9739 fb->height > new_acrtc->max_cursor_height) {
9740 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9741 new_plane_state->fb->width,
9742 new_plane_state->fb->height);
9745 if (new_plane_state->src_w != fb->width << 16 ||
9746 new_plane_state->src_h != fb->height << 16) {
9747 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9751 /* Pitch in pixels */
9752 pitch = fb->pitches[0] / fb->format->cpp[0];
9754 if (fb->width != pitch) {
9755 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9764 /* FB pitch is supported by cursor plane */
9767 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9771 /* Core DRM takes care of checking FB modifiers, so we only need to
9772 * check tiling flags when the FB doesn't have a modifier. */
9773 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9774 if (adev->family < AMDGPU_FAMILY_AI) {
9775 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9776 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9777 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9779 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9782 DRM_DEBUG_ATOMIC("Cursor FB not linear");
9790 static int dm_update_plane_state(struct dc *dc,
9791 struct drm_atomic_state *state,
9792 struct drm_plane *plane,
9793 struct drm_plane_state *old_plane_state,
9794 struct drm_plane_state *new_plane_state,
9796 bool *lock_and_validation_needed)
9799 struct dm_atomic_state *dm_state = NULL;
9800 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9801 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9802 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9803 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9804 struct amdgpu_crtc *new_acrtc;
9809 new_plane_crtc = new_plane_state->crtc;
9810 old_plane_crtc = old_plane_state->crtc;
9811 dm_new_plane_state = to_dm_plane_state(new_plane_state);
9812 dm_old_plane_state = to_dm_plane_state(old_plane_state);
9814 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9815 if (!enable || !new_plane_crtc ||
9816 drm_atomic_plane_disabling(plane->state, new_plane_state))
9819 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9821 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9822 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9826 if (new_plane_state->fb) {
9827 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9828 new_plane_state->fb);
9836 needs_reset = should_reset_plane(state, plane, old_plane_state,
9839 /* Remove any changed/removed planes */
9844 if (!old_plane_crtc)
9847 old_crtc_state = drm_atomic_get_old_crtc_state(
9848 state, old_plane_crtc);
9849 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9851 if (!dm_old_crtc_state->stream)
9854 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9855 plane->base.id, old_plane_crtc->base.id);
9857 ret = dm_atomic_get_state(state, &dm_state);
9861 if (!dc_remove_plane_from_context(
9863 dm_old_crtc_state->stream,
9864 dm_old_plane_state->dc_state,
9865 dm_state->context)) {
9871 dc_plane_state_release(dm_old_plane_state->dc_state);
9872 dm_new_plane_state->dc_state = NULL;
9874 *lock_and_validation_needed = true;
9876 } else { /* Add new planes */
9877 struct dc_plane_state *dc_new_plane_state;
9879 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9882 if (!new_plane_crtc)
9885 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9886 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9888 if (!dm_new_crtc_state->stream)
9894 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9898 WARN_ON(dm_new_plane_state->dc_state);
9900 dc_new_plane_state = dc_create_plane_state(dc);
9901 if (!dc_new_plane_state)
9904 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
9905 plane->base.id, new_plane_crtc->base.id);
9907 ret = fill_dc_plane_attributes(
9908 drm_to_adev(new_plane_crtc->dev),
9913 dc_plane_state_release(dc_new_plane_state);
9917 ret = dm_atomic_get_state(state, &dm_state);
9919 dc_plane_state_release(dc_new_plane_state);
9924 * Any atomic check errors that occur after this will
9925 * not need a release. The plane state will be attached
9926 * to the stream, and therefore part of the atomic
9927 * state. It'll be released when the atomic state is
9930 if (!dc_add_plane_to_context(
9932 dm_new_crtc_state->stream,
9934 dm_state->context)) {
9936 dc_plane_state_release(dc_new_plane_state);
9940 dm_new_plane_state->dc_state = dc_new_plane_state;
9942 /* Tell DC to do a full surface update every time there
9943 * is a plane change. Inefficient, but works for now.
9945 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9947 *lock_and_validation_needed = true;
9954 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9955 struct drm_crtc *crtc,
9956 struct drm_crtc_state *new_crtc_state)
9958 struct drm_plane_state *new_cursor_state, *new_primary_state;
9959 int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9961 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9962 * cursor per pipe but it's going to inherit the scaling and
9963 * positioning from the underlying pipe. Check the cursor plane's
9964 * blending properties match the primary plane's. */
9966 new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9967 new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9968 if (!new_cursor_state || !new_primary_state ||
9969 !new_cursor_state->fb || !new_primary_state->fb) {
9973 cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9974 (new_cursor_state->src_w >> 16);
9975 cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9976 (new_cursor_state->src_h >> 16);
9978 primary_scale_w = new_primary_state->crtc_w * 1000 /
9979 (new_primary_state->src_w >> 16);
9980 primary_scale_h = new_primary_state->crtc_h * 1000 /
9981 (new_primary_state->src_h >> 16);
9983 if (cursor_scale_w != primary_scale_w ||
9984 cursor_scale_h != primary_scale_h) {
9985 DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9992 #if defined(CONFIG_DRM_AMD_DC_DCN)
9993 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9995 struct drm_connector *connector;
9996 struct drm_connector_state *conn_state;
9997 struct amdgpu_dm_connector *aconnector = NULL;
9999 for_each_new_connector_in_state(state, connector, conn_state, i) {
10000 if (conn_state->crtc != crtc)
10003 aconnector = to_amdgpu_dm_connector(connector);
10004 if (!aconnector->port || !aconnector->mst_port)
10013 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10017 static int validate_overlay(struct drm_atomic_state *state)
10020 struct drm_plane *plane;
10021 struct drm_plane_state *old_plane_state, *new_plane_state;
10022 struct drm_plane_state *primary_state, *overlay_state = NULL;
10024 /* Check if primary plane is contained inside overlay */
10025 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10026 if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
10027 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10030 overlay_state = new_plane_state;
10035 /* check if we're making changes to the overlay plane */
10036 if (!overlay_state)
10039 /* check if overlay plane is enabled */
10040 if (!overlay_state->crtc)
10043 /* find the primary plane for the CRTC that the overlay is enabled on */
10044 primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
10045 if (IS_ERR(primary_state))
10046 return PTR_ERR(primary_state);
10048 /* check if primary plane is enabled */
10049 if (!primary_state->crtc)
10052 /* Perform the bounds check to ensure the overlay plane covers the primary */
10053 if (primary_state->crtc_x < overlay_state->crtc_x ||
10054 primary_state->crtc_y < overlay_state->crtc_y ||
10055 primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
10056 primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
10057 DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
10065 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10066 * @dev: The DRM device
10067 * @state: The atomic state to commit
10069 * Validate that the given atomic state is programmable by DC into hardware.
10070 * This involves constructing a &struct dc_state reflecting the new hardware
10071 * state we wish to commit, then querying DC to see if it is programmable. It's
10072 * important not to modify the existing DC state. Otherwise, atomic_check
10073 * may unexpectedly commit hardware changes.
10075 * When validating the DC state, it's important that the right locks are
10076 * acquired. For full updates case which removes/adds/updates streams on one
10077 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10078 * that any such full update commit will wait for completion of any outstanding
10079 * flip using DRMs synchronization events.
10081 * Note that DM adds the affected connectors for all CRTCs in state, when that
10082 * might not seem necessary. This is because DC stream creation requires the
10083 * DC sink, which is tied to the DRM connector state. Cleaning this up should
10084 * be possible but non-trivial - a possible TODO item.
10086 * Return: -Error code if validation failed.
10088 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10089 struct drm_atomic_state *state)
10091 struct amdgpu_device *adev = drm_to_adev(dev);
10092 struct dm_atomic_state *dm_state = NULL;
10093 struct dc *dc = adev->dm.dc;
10094 struct drm_connector *connector;
10095 struct drm_connector_state *old_con_state, *new_con_state;
10096 struct drm_crtc *crtc;
10097 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10098 struct drm_plane *plane;
10099 struct drm_plane_state *old_plane_state, *new_plane_state;
10100 enum dc_status status;
10102 bool lock_and_validation_needed = false;
10103 struct dm_crtc_state *dm_old_crtc_state;
10105 trace_amdgpu_dm_atomic_check_begin(state);
10107 ret = drm_atomic_helper_check_modeset(dev, state);
10111 /* Check connector changes */
10112 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10113 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10114 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10116 /* Skip connectors that are disabled or part of modeset already. */
10117 if (!old_con_state->crtc && !new_con_state->crtc)
10120 if (!new_con_state->crtc)
10123 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10124 if (IS_ERR(new_crtc_state)) {
10125 ret = PTR_ERR(new_crtc_state);
10129 if (dm_old_con_state->abm_level !=
10130 dm_new_con_state->abm_level)
10131 new_crtc_state->connectors_changed = true;
10134 #if defined(CONFIG_DRM_AMD_DC_DCN)
10135 if (dc_resource_is_dsc_encoding_supported(dc)) {
10136 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10137 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10138 ret = add_affected_mst_dsc_crtcs(state, crtc);
10145 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10146 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10148 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10149 !new_crtc_state->color_mgmt_changed &&
10150 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10151 dm_old_crtc_state->dsc_force_changed == false)
10154 if (!new_crtc_state->enable)
10157 ret = drm_atomic_add_affected_connectors(state, crtc);
10161 ret = drm_atomic_add_affected_planes(state, crtc);
10165 if (dm_old_crtc_state->dsc_force_changed)
10166 new_crtc_state->mode_changed = true;
10170 * Add all primary and overlay planes on the CRTC to the state
10171 * whenever a plane is enabled to maintain correct z-ordering
10172 * and to enable fast surface updates.
10174 drm_for_each_crtc(crtc, dev) {
10175 bool modified = false;
10177 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10178 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10181 if (new_plane_state->crtc == crtc ||
10182 old_plane_state->crtc == crtc) {
10191 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10192 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10196 drm_atomic_get_plane_state(state, plane);
10198 if (IS_ERR(new_plane_state)) {
10199 ret = PTR_ERR(new_plane_state);
10205 /* Remove exiting planes if they are modified */
10206 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10207 ret = dm_update_plane_state(dc, state, plane,
10211 &lock_and_validation_needed);
10216 /* Disable all crtcs which require disable */
10217 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10218 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10222 &lock_and_validation_needed);
10227 /* Enable all crtcs which require enable */
10228 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10229 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10233 &lock_and_validation_needed);
10238 ret = validate_overlay(state);
10242 /* Add new/modified planes */
10243 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10244 ret = dm_update_plane_state(dc, state, plane,
10248 &lock_and_validation_needed);
10253 /* Run this here since we want to validate the streams we created */
10254 ret = drm_atomic_helper_check_planes(dev, state);
10258 /* Check cursor planes scaling */
10259 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10260 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10265 if (state->legacy_cursor_update) {
10267 * This is a fast cursor update coming from the plane update
10268 * helper, check if it can be done asynchronously for better
10271 state->async_update =
10272 !drm_atomic_helper_async_check(dev, state);
10275 * Skip the remaining global validation if this is an async
10276 * update. Cursor updates can be done without affecting
10277 * state or bandwidth calcs and this avoids the performance
10278 * penalty of locking the private state object and
10279 * allocating a new dc_state.
10281 if (state->async_update)
10285 /* Check scaling and underscan changes*/
10286 /* TODO Removed scaling changes validation due to inability to commit
10287 * new stream into context w\o causing full reset. Need to
10288 * decide how to handle.
10290 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10291 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10292 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10293 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10295 /* Skip any modesets/resets */
10296 if (!acrtc || drm_atomic_crtc_needs_modeset(
10297 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10300 /* Skip any thing not scale or underscan changes */
10301 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10304 lock_and_validation_needed = true;
10308 * Streams and planes are reset when there are changes that affect
10309 * bandwidth. Anything that affects bandwidth needs to go through
10310 * DC global validation to ensure that the configuration can be applied
10313 * We have to currently stall out here in atomic_check for outstanding
10314 * commits to finish in this case because our IRQ handlers reference
10315 * DRM state directly - we can end up disabling interrupts too early
10318 * TODO: Remove this stall and drop DM state private objects.
10320 if (lock_and_validation_needed) {
10321 ret = dm_atomic_get_state(state, &dm_state);
10325 ret = do_aquire_global_lock(dev, state);
10329 #if defined(CONFIG_DRM_AMD_DC_DCN)
10330 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
10333 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
10339 * Perform validation of MST topology in the state:
10340 * We need to perform MST atomic check before calling
10341 * dc_validate_global_state(), or there is a chance
10342 * to get stuck in an infinite loop and hang eventually.
10344 ret = drm_dp_mst_atomic_check(state);
10347 status = dc_validate_global_state(dc, dm_state->context, false);
10348 if (status != DC_OK) {
10349 DC_LOG_WARNING("DC global validation failure: %s (%d)",
10350 dc_status_to_str(status), status);
10356 * The commit is a fast update. Fast updates shouldn't change
10357 * the DC context, affect global validation, and can have their
10358 * commit work done in parallel with other commits not touching
10359 * the same resource. If we have a new DC context as part of
10360 * the DM atomic state from validation we need to free it and
10361 * retain the existing one instead.
10363 * Furthermore, since the DM atomic state only contains the DC
10364 * context and can safely be annulled, we can free the state
10365 * and clear the associated private object now to free
10366 * some memory and avoid a possible use-after-free later.
10369 for (i = 0; i < state->num_private_objs; i++) {
10370 struct drm_private_obj *obj = state->private_objs[i].ptr;
10372 if (obj->funcs == adev->dm.atomic_obj.funcs) {
10373 int j = state->num_private_objs-1;
10375 dm_atomic_destroy_state(obj,
10376 state->private_objs[i].state);
10378 /* If i is not at the end of the array then the
10379 * last element needs to be moved to where i was
10380 * before the array can safely be truncated.
10383 state->private_objs[i] =
10384 state->private_objs[j];
10386 state->private_objs[j].ptr = NULL;
10387 state->private_objs[j].state = NULL;
10388 state->private_objs[j].old_state = NULL;
10389 state->private_objs[j].new_state = NULL;
10391 state->num_private_objs = j;
10397 /* Store the overall update type for use later in atomic check. */
10398 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10399 struct dm_crtc_state *dm_new_crtc_state =
10400 to_dm_crtc_state(new_crtc_state);
10402 dm_new_crtc_state->update_type = lock_and_validation_needed ?
10407 /* Must be success */
10410 trace_amdgpu_dm_atomic_check_finish(state, ret);
10415 if (ret == -EDEADLK)
10416 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
10417 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
10418 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
10420 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
10422 trace_amdgpu_dm_atomic_check_finish(state, ret);
10427 static bool is_dp_capable_without_timing_msa(struct dc *dc,
10428 struct amdgpu_dm_connector *amdgpu_dm_connector)
10431 bool capable = false;
10433 if (amdgpu_dm_connector->dc_link &&
10434 dm_helpers_dp_read_dpcd(
10436 amdgpu_dm_connector->dc_link,
10437 DP_DOWN_STREAM_PORT_COUNT,
10439 sizeof(dpcd_data))) {
10440 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10446 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10447 uint8_t *edid_ext, int len,
10448 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10451 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10452 struct dc *dc = adev->dm.dc;
10454 /* send extension block to DMCU for parsing */
10455 for (i = 0; i < len; i += 8) {
10459 /* send 8 bytes a time */
10460 if (!dc_edid_parser_send_cea(dc, i, len, &edid_ext[i], 8))
10464 /* EDID block sent completed, expect result */
10465 int version, min_rate, max_rate;
10467 res = dc_edid_parser_recv_amd_vsdb(dc, &version, &min_rate, &max_rate);
10469 /* amd vsdb found */
10470 vsdb_info->freesync_supported = 1;
10471 vsdb_info->amd_vsdb_version = version;
10472 vsdb_info->min_refresh_rate_hz = min_rate;
10473 vsdb_info->max_refresh_rate_hz = max_rate;
10481 res = dc_edid_parser_recv_cea_ack(dc, &offset);
10489 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10490 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10492 uint8_t *edid_ext = NULL;
10494 bool valid_vsdb_found = false;
10496 /*----- drm_find_cea_extension() -----*/
10497 /* No EDID or EDID extensions */
10498 if (edid == NULL || edid->extensions == 0)
10501 /* Find CEA extension */
10502 for (i = 0; i < edid->extensions; i++) {
10503 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10504 if (edid_ext[0] == CEA_EXT)
10508 if (i == edid->extensions)
10511 /*----- cea_db_offsets() -----*/
10512 if (edid_ext[0] != CEA_EXT)
10515 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
10517 return valid_vsdb_found ? i : -ENODEV;
10520 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10524 struct detailed_timing *timing;
10525 struct detailed_non_pixel *data;
10526 struct detailed_data_monitor_range *range;
10527 struct amdgpu_dm_connector *amdgpu_dm_connector =
10528 to_amdgpu_dm_connector(connector);
10529 struct dm_connector_state *dm_con_state = NULL;
10531 struct drm_device *dev = connector->dev;
10532 struct amdgpu_device *adev = drm_to_adev(dev);
10533 bool freesync_capable = false;
10534 struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
10536 if (!connector->state) {
10537 DRM_ERROR("%s - Connector has no state", __func__);
10542 dm_con_state = to_dm_connector_state(connector->state);
10544 amdgpu_dm_connector->min_vfreq = 0;
10545 amdgpu_dm_connector->max_vfreq = 0;
10546 amdgpu_dm_connector->pixel_clock_mhz = 0;
10551 dm_con_state = to_dm_connector_state(connector->state);
10553 if (!amdgpu_dm_connector->dc_sink) {
10554 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
10557 if (!adev->dm.freesync_module)
10561 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10562 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
10563 bool edid_check_required = false;
10566 edid_check_required = is_dp_capable_without_timing_msa(
10568 amdgpu_dm_connector);
10571 if (edid_check_required == true && (edid->version > 1 ||
10572 (edid->version == 1 && edid->revision > 1))) {
10573 for (i = 0; i < 4; i++) {
10575 timing = &edid->detailed_timings[i];
10576 data = &timing->data.other_data;
10577 range = &data->data.range;
10579 * Check if monitor has continuous frequency mode
10581 if (data->type != EDID_DETAIL_MONITOR_RANGE)
10584 * Check for flag range limits only. If flag == 1 then
10585 * no additional timing information provided.
10586 * Default GTF, GTF Secondary curve and CVT are not
10589 if (range->flags != 1)
10592 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10593 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10594 amdgpu_dm_connector->pixel_clock_mhz =
10595 range->pixel_clock_mhz * 10;
10597 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10598 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10603 if (amdgpu_dm_connector->max_vfreq -
10604 amdgpu_dm_connector->min_vfreq > 10) {
10606 freesync_capable = true;
10609 } else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10610 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10611 if (i >= 0 && vsdb_info.freesync_supported) {
10612 timing = &edid->detailed_timings[i];
10613 data = &timing->data.other_data;
10615 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10616 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10617 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10618 freesync_capable = true;
10620 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10621 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10627 dm_con_state->freesync_capable = freesync_capable;
10629 if (connector->vrr_capable_property)
10630 drm_connector_set_vrr_capable_property(connector,
10634 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
10636 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
10638 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
10640 if (link->type == dc_connection_none)
10642 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
10643 dpcd_data, sizeof(dpcd_data))) {
10644 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
10646 if (dpcd_data[0] == 0) {
10647 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
10648 link->psr_settings.psr_feature_enabled = false;
10650 link->psr_settings.psr_version = DC_PSR_VERSION_1;
10651 link->psr_settings.psr_feature_enabled = true;
10654 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
10659 * amdgpu_dm_link_setup_psr() - configure psr link
10660 * @stream: stream state
10662 * Return: true if success
10664 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
10666 struct dc_link *link = NULL;
10667 struct psr_config psr_config = {0};
10668 struct psr_context psr_context = {0};
10671 if (stream == NULL)
10674 link = stream->link;
10676 psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
10678 if (psr_config.psr_version > 0) {
10679 psr_config.psr_exit_link_training_required = 0x1;
10680 psr_config.psr_frame_capture_indication_req = 0;
10681 psr_config.psr_rfb_setup_time = 0x37;
10682 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
10683 psr_config.allow_smu_optimizations = 0x0;
10685 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
10688 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
10694 * amdgpu_dm_psr_enable() - enable psr f/w
10695 * @stream: stream state
10697 * Return: true if success
10699 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
10701 struct dc_link *link = stream->link;
10702 unsigned int vsync_rate_hz = 0;
10703 struct dc_static_screen_params params = {0};
10704 /* Calculate number of static frames before generating interrupt to
10707 // Init fail safe of 2 frames static
10708 unsigned int num_frames_static = 2;
10710 DRM_DEBUG_DRIVER("Enabling psr...\n");
10712 vsync_rate_hz = div64_u64(div64_u64((
10713 stream->timing.pix_clk_100hz * 100),
10714 stream->timing.v_total),
10715 stream->timing.h_total);
10718 * Calculate number of frames such that at least 30 ms of time has
10721 if (vsync_rate_hz != 0) {
10722 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
10723 num_frames_static = (30000 / frame_time_microsec) + 1;
10726 params.triggers.cursor_update = true;
10727 params.triggers.overlay_update = true;
10728 params.triggers.surface_update = true;
10729 params.num_frames = num_frames_static;
10731 dc_stream_set_static_screen_params(link->ctx->dc,
10735 return dc_link_set_psr_allow_active(link, true, false, false);
10739 * amdgpu_dm_psr_disable() - disable psr f/w
10740 * @stream: stream state
10742 * Return: true if success
10744 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
10747 DRM_DEBUG_DRIVER("Disabling psr...\n");
10749 return dc_link_set_psr_allow_active(stream->link, false, true, false);
10753 * amdgpu_dm_psr_disable() - disable psr f/w
10754 * if psr is enabled on any stream
10756 * Return: true if success
10758 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
10760 DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
10761 return dc_set_psr_allow_active(dm->dc, false);
10764 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10766 struct amdgpu_device *adev = drm_to_adev(dev);
10767 struct dc *dc = adev->dm.dc;
10770 mutex_lock(&adev->dm.dc_lock);
10771 if (dc->current_state) {
10772 for (i = 0; i < dc->current_state->stream_count; ++i)
10773 dc->current_state->streams[i]
10774 ->triggered_crtc_reset.enabled =
10775 adev->dm.force_timing_sync;
10777 dm_enable_per_frame_crtc_master_sync(dc->current_state);
10778 dc_trigger_sync(dc, dc->current_state);
10780 mutex_unlock(&adev->dm.dc_lock);
10783 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10784 uint32_t value, const char *func_name)
10786 #ifdef DM_CHECK_ADDR_0
10787 if (address == 0) {
10788 DC_ERR("invalid register write. address = 0");
10792 cgs_write_register(ctx->cgs_device, address, value);
10793 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10796 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10797 const char *func_name)
10800 #ifdef DM_CHECK_ADDR_0
10801 if (address == 0) {
10802 DC_ERR("invalid register read; address = 0\n");
10807 if (ctx->dmub_srv &&
10808 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10809 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10814 value = cgs_read_register(ctx->cgs_device, address);
10816 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10821 int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int linkIndex,
10822 struct aux_payload *payload, enum aux_return_code_type *operation_result)
10824 struct amdgpu_device *adev = ctx->driver_context;
10827 dc_process_dmub_aux_transfer_async(ctx->dc, linkIndex, payload);
10828 ret = wait_for_completion_interruptible_timeout(&adev->dm.dmub_aux_transfer_done, 10*HZ);
10830 *operation_result = AUX_RET_ERROR_TIMEOUT;
10833 *operation_result = (enum aux_return_code_type)adev->dm.dmub_notify->result;
10835 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
10836 (*payload->reply) = adev->dm.dmub_notify->aux_reply.command;
10838 // For read case, Copy data to payload
10839 if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
10840 (*payload->reply == AUX_TRANSACTION_REPLY_AUX_ACK))
10841 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
10842 adev->dm.dmub_notify->aux_reply.length);
10845 return adev->dm.dmub_notify->aux_reply.length;