2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
29 #include "dm_services_types.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "dc/dc_edid_parser.h"
38 #include "dc/dc_stat.h"
39 #include "amdgpu_dm_trace.h"
43 #include "amdgpu_display.h"
44 #include "amdgpu_ucode.h"
46 #include "amdgpu_dm.h"
47 #ifdef CONFIG_DRM_AMD_DC_HDCP
48 #include "amdgpu_dm_hdcp.h"
49 #include <drm/drm_hdcp.h>
51 #include "amdgpu_pm.h"
53 #include "amd_shared.h"
54 #include "amdgpu_dm_irq.h"
55 #include "dm_helpers.h"
56 #include "amdgpu_dm_mst_types.h"
57 #if defined(CONFIG_DEBUG_FS)
58 #include "amdgpu_dm_debugfs.h"
61 #include "ivsrcid/ivsrcid_vislands30.h"
63 #include "i2caux_interface.h"
64 #include <linux/module.h>
65 #include <linux/moduleparam.h>
66 #include <linux/types.h>
67 #include <linux/pm_runtime.h>
68 #include <linux/pci.h>
69 #include <linux/firmware.h>
70 #include <linux/component.h>
72 #include <drm/drm_atomic.h>
73 #include <drm/drm_atomic_uapi.h>
74 #include <drm/drm_atomic_helper.h>
75 #include <drm/drm_dp_mst_helper.h>
76 #include <drm/drm_fb_helper.h>
77 #include <drm/drm_fourcc.h>
78 #include <drm/drm_edid.h>
79 #include <drm/drm_vblank.h>
80 #include <drm/drm_audio_component.h>
82 #if defined(CONFIG_DRM_AMD_DC_DCN)
83 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
85 #include "dcn/dcn_1_0_offset.h"
86 #include "dcn/dcn_1_0_sh_mask.h"
87 #include "soc15_hw_ip.h"
88 #include "vega10_ip_offset.h"
90 #include "soc15_common.h"
93 #include "modules/inc/mod_freesync.h"
94 #include "modules/power/power_helpers.h"
95 #include "modules/inc/mod_info_packet.h"
97 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
98 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
99 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
100 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
101 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
103 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
105 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
107 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
108 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
110 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
111 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
113 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
114 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
116 /* Number of bytes in PSP header for firmware. */
117 #define PSP_HEADER_BYTES 0x100
119 /* Number of bytes in PSP footer for firmware. */
120 #define PSP_FOOTER_BYTES 0x100
125 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
126 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
127 * requests into DC requests, and DC responses into DRM responses.
129 * The root control structure is &struct amdgpu_display_manager.
132 /* basic init/fini API */
133 static int amdgpu_dm_init(struct amdgpu_device *adev);
134 static void amdgpu_dm_fini(struct amdgpu_device *adev);
135 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
137 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
139 switch (link->dpcd_caps.dongle_type) {
140 case DISPLAY_DONGLE_NONE:
141 return DRM_MODE_SUBCONNECTOR_Native;
142 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
143 return DRM_MODE_SUBCONNECTOR_VGA;
144 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
145 case DISPLAY_DONGLE_DP_DVI_DONGLE:
146 return DRM_MODE_SUBCONNECTOR_DVID;
147 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
148 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
149 return DRM_MODE_SUBCONNECTOR_HDMIA;
150 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
152 return DRM_MODE_SUBCONNECTOR_Unknown;
156 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
158 struct dc_link *link = aconnector->dc_link;
159 struct drm_connector *connector = &aconnector->base;
160 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
162 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
165 if (aconnector->dc_sink)
166 subconnector = get_subconnector_type(link);
168 drm_object_property_set_value(&connector->base,
169 connector->dev->mode_config.dp_subconnector_property,
174 * initializes drm_device display related structures, based on the information
175 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
176 * drm_encoder, drm_mode_config
178 * Returns 0 on success
180 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
181 /* removes and deallocates the drm structures, created by the above function */
182 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
184 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
185 struct drm_plane *plane,
186 unsigned long possible_crtcs,
187 const struct dc_plane_cap *plane_cap);
188 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
189 struct drm_plane *plane,
190 uint32_t link_index);
191 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
192 struct amdgpu_dm_connector *amdgpu_dm_connector,
194 struct amdgpu_encoder *amdgpu_encoder);
195 static int amdgpu_dm_encoder_init(struct drm_device *dev,
196 struct amdgpu_encoder *aencoder,
197 uint32_t link_index);
199 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
201 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
203 static int amdgpu_dm_atomic_check(struct drm_device *dev,
204 struct drm_atomic_state *state);
206 static void handle_cursor_update(struct drm_plane *plane,
207 struct drm_plane_state *old_plane_state);
209 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
210 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
211 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
212 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
213 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
215 static const struct drm_format_info *
216 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
219 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
220 struct drm_crtc_state *new_crtc_state);
222 * dm_vblank_get_counter
225 * Get counter for number of vertical blanks
228 * struct amdgpu_device *adev - [in] desired amdgpu device
229 * int disp_idx - [in] which CRTC to get the counter from
232 * Counter for vertical blanks
234 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
236 if (crtc >= adev->mode_info.num_crtc)
239 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
241 if (acrtc->dm_irq_params.stream == NULL) {
242 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
247 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
251 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
252 u32 *vbl, u32 *position)
254 uint32_t v_blank_start, v_blank_end, h_position, v_position;
256 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
259 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
261 if (acrtc->dm_irq_params.stream == NULL) {
262 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
268 * TODO rework base driver to use values directly.
269 * for now parse it back into reg-format
271 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
277 *position = v_position | (h_position << 16);
278 *vbl = v_blank_start | (v_blank_end << 16);
284 static bool dm_is_idle(void *handle)
290 static int dm_wait_for_idle(void *handle)
296 static bool dm_check_soft_reset(void *handle)
301 static int dm_soft_reset(void *handle)
307 static struct amdgpu_crtc *
308 get_crtc_by_otg_inst(struct amdgpu_device *adev,
311 struct drm_device *dev = adev_to_drm(adev);
312 struct drm_crtc *crtc;
313 struct amdgpu_crtc *amdgpu_crtc;
315 if (otg_inst == -1) {
317 return adev->mode_info.crtcs[0];
320 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
321 amdgpu_crtc = to_amdgpu_crtc(crtc);
323 if (amdgpu_crtc->otg_inst == otg_inst)
330 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
332 return acrtc->dm_irq_params.freesync_config.state ==
333 VRR_STATE_ACTIVE_VARIABLE ||
334 acrtc->dm_irq_params.freesync_config.state ==
335 VRR_STATE_ACTIVE_FIXED;
338 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
340 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
341 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
344 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
345 struct dm_crtc_state *new_state)
347 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
349 else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
356 * dm_pflip_high_irq() - Handle pageflip interrupt
357 * @interrupt_params: ignored
359 * Handles the pageflip interrupt by notifying all interested parties
360 * that the pageflip has been completed.
362 static void dm_pflip_high_irq(void *interrupt_params)
364 struct amdgpu_crtc *amdgpu_crtc;
365 struct common_irq_params *irq_params = interrupt_params;
366 struct amdgpu_device *adev = irq_params->adev;
368 struct drm_pending_vblank_event *e;
369 uint32_t vpos, hpos, v_blank_start, v_blank_end;
372 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
374 /* IRQ could occur when in initial stage */
375 /* TODO work and BO cleanup */
376 if (amdgpu_crtc == NULL) {
377 DC_LOG_PFLIP("CRTC is null, returning.\n");
381 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
383 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
384 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
385 amdgpu_crtc->pflip_status,
386 AMDGPU_FLIP_SUBMITTED,
387 amdgpu_crtc->crtc_id,
389 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
393 /* page flip completed. */
394 e = amdgpu_crtc->event;
395 amdgpu_crtc->event = NULL;
400 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
402 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
404 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
405 &v_blank_end, &hpos, &vpos) ||
406 (vpos < v_blank_start)) {
407 /* Update to correct count and vblank timestamp if racing with
408 * vblank irq. This also updates to the correct vblank timestamp
409 * even in VRR mode, as scanout is past the front-porch atm.
411 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
413 /* Wake up userspace by sending the pageflip event with proper
414 * count and timestamp of vblank of flip completion.
417 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
419 /* Event sent, so done with vblank for this flip */
420 drm_crtc_vblank_put(&amdgpu_crtc->base);
423 /* VRR active and inside front-porch: vblank count and
424 * timestamp for pageflip event will only be up to date after
425 * drm_crtc_handle_vblank() has been executed from late vblank
426 * irq handler after start of back-porch (vline 0). We queue the
427 * pageflip event for send-out by drm_crtc_handle_vblank() with
428 * updated timestamp and count, once it runs after us.
430 * We need to open-code this instead of using the helper
431 * drm_crtc_arm_vblank_event(), as that helper would
432 * call drm_crtc_accurate_vblank_count(), which we must
433 * not call in VRR mode while we are in front-porch!
436 /* sequence will be replaced by real count during send-out. */
437 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
438 e->pipe = amdgpu_crtc->crtc_id;
440 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
444 /* Keep track of vblank of this flip for flip throttling. We use the
445 * cooked hw counter, as that one incremented at start of this vblank
446 * of pageflip completion, so last_flip_vblank is the forbidden count
447 * for queueing new pageflips if vsync + VRR is enabled.
449 amdgpu_crtc->dm_irq_params.last_flip_vblank =
450 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
452 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
453 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
455 DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
456 amdgpu_crtc->crtc_id, amdgpu_crtc,
457 vrr_active, (int) !e);
460 static void dm_vupdate_high_irq(void *interrupt_params)
462 struct common_irq_params *irq_params = interrupt_params;
463 struct amdgpu_device *adev = irq_params->adev;
464 struct amdgpu_crtc *acrtc;
465 struct drm_device *drm_dev;
466 struct drm_vblank_crtc *vblank;
467 ktime_t frame_duration_ns, previous_timestamp;
471 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
474 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
475 drm_dev = acrtc->base.dev;
476 vblank = &drm_dev->vblank[acrtc->base.index];
477 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
478 frame_duration_ns = vblank->time - previous_timestamp;
480 if (frame_duration_ns > 0) {
481 trace_amdgpu_refresh_rate_track(acrtc->base.index,
483 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
484 atomic64_set(&irq_params->previous_timestamp, vblank->time);
487 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
491 /* Core vblank handling is done here after end of front-porch in
492 * vrr mode, as vblank timestamping will give valid results
493 * while now done after front-porch. This will also deliver
494 * page-flip completion events that have been queued to us
495 * if a pageflip happened inside front-porch.
498 drm_crtc_handle_vblank(&acrtc->base);
500 /* BTR processing for pre-DCE12 ASICs */
501 if (acrtc->dm_irq_params.stream &&
502 adev->family < AMDGPU_FAMILY_AI) {
503 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
504 mod_freesync_handle_v_update(
505 adev->dm.freesync_module,
506 acrtc->dm_irq_params.stream,
507 &acrtc->dm_irq_params.vrr_params);
509 dc_stream_adjust_vmin_vmax(
511 acrtc->dm_irq_params.stream,
512 &acrtc->dm_irq_params.vrr_params.adjust);
513 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
520 * dm_crtc_high_irq() - Handles CRTC interrupt
521 * @interrupt_params: used for determining the CRTC instance
523 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
526 static void dm_crtc_high_irq(void *interrupt_params)
528 struct common_irq_params *irq_params = interrupt_params;
529 struct amdgpu_device *adev = irq_params->adev;
530 struct amdgpu_crtc *acrtc;
534 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
538 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
540 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
541 vrr_active, acrtc->dm_irq_params.active_planes);
544 * Core vblank handling at start of front-porch is only possible
545 * in non-vrr mode, as only there vblank timestamping will give
546 * valid results while done in front-porch. Otherwise defer it
547 * to dm_vupdate_high_irq after end of front-porch.
550 drm_crtc_handle_vblank(&acrtc->base);
553 * Following stuff must happen at start of vblank, for crc
554 * computation and below-the-range btr support in vrr mode.
556 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
558 /* BTR updates need to happen before VUPDATE on Vega and above. */
559 if (adev->family < AMDGPU_FAMILY_AI)
562 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
564 if (acrtc->dm_irq_params.stream &&
565 acrtc->dm_irq_params.vrr_params.supported &&
566 acrtc->dm_irq_params.freesync_config.state ==
567 VRR_STATE_ACTIVE_VARIABLE) {
568 mod_freesync_handle_v_update(adev->dm.freesync_module,
569 acrtc->dm_irq_params.stream,
570 &acrtc->dm_irq_params.vrr_params);
572 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
573 &acrtc->dm_irq_params.vrr_params.adjust);
577 * If there aren't any active_planes then DCH HUBP may be clock-gated.
578 * In that case, pageflip completion interrupts won't fire and pageflip
579 * completion events won't get delivered. Prevent this by sending
580 * pending pageflip events from here if a flip is still pending.
582 * If any planes are enabled, use dm_pflip_high_irq() instead, to
583 * avoid race conditions between flip programming and completion,
584 * which could cause too early flip completion events.
586 if (adev->family >= AMDGPU_FAMILY_RV &&
587 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
588 acrtc->dm_irq_params.active_planes == 0) {
590 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
592 drm_crtc_vblank_put(&acrtc->base);
594 acrtc->pflip_status = AMDGPU_FLIP_NONE;
597 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
600 #if defined(CONFIG_DRM_AMD_DC_DCN)
602 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
603 * DCN generation ASICs
604 * @interrupt params - interrupt parameters
606 * Used to set crc window/read out crc value at vertical line 0 position
608 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
609 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
611 struct common_irq_params *irq_params = interrupt_params;
612 struct amdgpu_device *adev = irq_params->adev;
613 struct amdgpu_crtc *acrtc;
615 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
620 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
626 * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
627 * @interrupt_params: used for determining the Outbox instance
629 * Handles the Outbox Interrupt
632 #define DMUB_TRACE_MAX_READ 64
633 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
635 struct dmub_notification notify;
636 struct common_irq_params *irq_params = interrupt_params;
637 struct amdgpu_device *adev = irq_params->adev;
638 struct amdgpu_display_manager *dm = &adev->dm;
639 struct dmcub_trace_buf_entry entry = { 0 };
642 if (dc_enable_dmub_notifications(adev->dm.dc)) {
643 if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
645 dc_stat_get_dmub_notification(adev->dm.dc, ¬ify);
646 } while (notify.pending_notification);
648 if (adev->dm.dmub_notify)
649 memcpy(adev->dm.dmub_notify, ¬ify, sizeof(struct dmub_notification));
650 if (notify.type == DMUB_NOTIFICATION_AUX_REPLY)
651 complete(&adev->dm.dmub_aux_transfer_done);
652 // TODO : HPD Implementation
655 DRM_ERROR("DM: Failed to receive correct outbox IRQ !");
661 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
662 trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
663 entry.param0, entry.param1);
665 DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
666 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
672 } while (count <= DMUB_TRACE_MAX_READ);
674 ASSERT(count <= DMUB_TRACE_MAX_READ);
677 static int dm_set_clockgating_state(void *handle,
678 enum amd_clockgating_state state)
683 static int dm_set_powergating_state(void *handle,
684 enum amd_powergating_state state)
689 /* Prototypes of private functions */
690 static int dm_early_init(void* handle);
692 /* Allocate memory for FBC compressed data */
693 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
695 struct drm_device *dev = connector->dev;
696 struct amdgpu_device *adev = drm_to_adev(dev);
697 struct dm_compressor_info *compressor = &adev->dm.compressor;
698 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
699 struct drm_display_mode *mode;
700 unsigned long max_size = 0;
702 if (adev->dm.dc->fbc_compressor == NULL)
705 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
708 if (compressor->bo_ptr)
712 list_for_each_entry(mode, &connector->modes, head) {
713 if (max_size < mode->htotal * mode->vtotal)
714 max_size = mode->htotal * mode->vtotal;
718 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
719 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
720 &compressor->gpu_addr, &compressor->cpu_addr);
723 DRM_ERROR("DM: Failed to initialize FBC\n");
725 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
726 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
733 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
734 int pipe, bool *enabled,
735 unsigned char *buf, int max_bytes)
737 struct drm_device *dev = dev_get_drvdata(kdev);
738 struct amdgpu_device *adev = drm_to_adev(dev);
739 struct drm_connector *connector;
740 struct drm_connector_list_iter conn_iter;
741 struct amdgpu_dm_connector *aconnector;
746 mutex_lock(&adev->dm.audio_lock);
748 drm_connector_list_iter_begin(dev, &conn_iter);
749 drm_for_each_connector_iter(connector, &conn_iter) {
750 aconnector = to_amdgpu_dm_connector(connector);
751 if (aconnector->audio_inst != port)
755 ret = drm_eld_size(connector->eld);
756 memcpy(buf, connector->eld, min(max_bytes, ret));
760 drm_connector_list_iter_end(&conn_iter);
762 mutex_unlock(&adev->dm.audio_lock);
764 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
769 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
770 .get_eld = amdgpu_dm_audio_component_get_eld,
773 static int amdgpu_dm_audio_component_bind(struct device *kdev,
774 struct device *hda_kdev, void *data)
776 struct drm_device *dev = dev_get_drvdata(kdev);
777 struct amdgpu_device *adev = drm_to_adev(dev);
778 struct drm_audio_component *acomp = data;
780 acomp->ops = &amdgpu_dm_audio_component_ops;
782 adev->dm.audio_component = acomp;
787 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
788 struct device *hda_kdev, void *data)
790 struct drm_device *dev = dev_get_drvdata(kdev);
791 struct amdgpu_device *adev = drm_to_adev(dev);
792 struct drm_audio_component *acomp = data;
796 adev->dm.audio_component = NULL;
799 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
800 .bind = amdgpu_dm_audio_component_bind,
801 .unbind = amdgpu_dm_audio_component_unbind,
804 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
811 adev->mode_info.audio.enabled = true;
813 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
815 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
816 adev->mode_info.audio.pin[i].channels = -1;
817 adev->mode_info.audio.pin[i].rate = -1;
818 adev->mode_info.audio.pin[i].bits_per_sample = -1;
819 adev->mode_info.audio.pin[i].status_bits = 0;
820 adev->mode_info.audio.pin[i].category_code = 0;
821 adev->mode_info.audio.pin[i].connected = false;
822 adev->mode_info.audio.pin[i].id =
823 adev->dm.dc->res_pool->audios[i]->inst;
824 adev->mode_info.audio.pin[i].offset = 0;
827 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
831 adev->dm.audio_registered = true;
836 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
841 if (!adev->mode_info.audio.enabled)
844 if (adev->dm.audio_registered) {
845 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
846 adev->dm.audio_registered = false;
849 /* TODO: Disable audio? */
851 adev->mode_info.audio.enabled = false;
854 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
856 struct drm_audio_component *acomp = adev->dm.audio_component;
858 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
859 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
861 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
866 static int dm_dmub_hw_init(struct amdgpu_device *adev)
868 const struct dmcub_firmware_header_v1_0 *hdr;
869 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
870 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
871 const struct firmware *dmub_fw = adev->dm.dmub_fw;
872 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
873 struct abm *abm = adev->dm.dc->res_pool->abm;
874 struct dmub_srv_hw_params hw_params;
875 enum dmub_status status;
876 const unsigned char *fw_inst_const, *fw_bss_data;
877 uint32_t i, fw_inst_const_size, fw_bss_data_size;
881 /* DMUB isn't supported on the ASIC. */
885 DRM_ERROR("No framebuffer info for DMUB service.\n");
890 /* Firmware required for DMUB support. */
891 DRM_ERROR("No firmware provided for DMUB.\n");
895 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
896 if (status != DMUB_STATUS_OK) {
897 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
901 if (!has_hw_support) {
902 DRM_INFO("DMUB unsupported on ASIC\n");
906 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
908 fw_inst_const = dmub_fw->data +
909 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
912 fw_bss_data = dmub_fw->data +
913 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
914 le32_to_cpu(hdr->inst_const_bytes);
916 /* Copy firmware and bios info into FB memory. */
917 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
918 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
920 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
922 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
923 * amdgpu_ucode_init_single_fw will load dmub firmware
924 * fw_inst_const part to cw0; otherwise, the firmware back door load
925 * will be done by dm_dmub_hw_init
927 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
928 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
932 if (fw_bss_data_size)
933 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
934 fw_bss_data, fw_bss_data_size);
936 /* Copy firmware bios info into FB memory. */
937 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
940 /* Reset regions that need to be reset. */
941 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
942 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
944 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
945 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
947 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
948 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
950 /* Initialize hardware. */
951 memset(&hw_params, 0, sizeof(hw_params));
952 hw_params.fb_base = adev->gmc.fb_start;
953 hw_params.fb_offset = adev->gmc.aper_base;
955 /* backdoor load firmware and trigger dmub running */
956 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
957 hw_params.load_inst_const = true;
960 hw_params.psp_version = dmcu->psp_version;
962 for (i = 0; i < fb_info->num_fb; ++i)
963 hw_params.fb[i] = &fb_info->fb[i];
965 status = dmub_srv_hw_init(dmub_srv, &hw_params);
966 if (status != DMUB_STATUS_OK) {
967 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
971 /* Wait for firmware load to finish. */
972 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
973 if (status != DMUB_STATUS_OK)
974 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
976 /* Init DMCU and ABM if available. */
978 dmcu->funcs->dmcu_init(dmcu);
979 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
982 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
983 if (!adev->dm.dc->ctx->dmub_srv) {
984 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
988 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
989 adev->dm.dmcub_fw_version);
994 #if defined(CONFIG_DRM_AMD_DC_DCN)
995 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
998 uint32_t logical_addr_low;
999 uint32_t logical_addr_high;
1000 uint32_t agp_base, agp_bot, agp_top;
1001 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1003 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1004 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1006 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1008 * Raven2 has a HW issue that it is unable to use the vram which
1009 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1010 * workaround that increase system aperture high address (add 1)
1011 * to get rid of the VM fault and hardware hang.
1013 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1015 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1018 agp_bot = adev->gmc.agp_start >> 24;
1019 agp_top = adev->gmc.agp_end >> 24;
1022 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1023 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1024 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1025 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1026 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1027 page_table_base.low_part = lower_32_bits(pt_base);
1029 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1030 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1032 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1033 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1034 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1036 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1037 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1038 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1040 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1041 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1042 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1044 pa_config->is_hvm_enabled = 0;
1048 #if defined(CONFIG_DRM_AMD_DC_DCN)
1049 static void event_mall_stutter(struct work_struct *work)
1052 struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
1053 struct amdgpu_display_manager *dm = vblank_work->dm;
1055 mutex_lock(&dm->dc_lock);
1057 if (vblank_work->enable)
1058 dm->active_vblank_irq_count++;
1059 else if(dm->active_vblank_irq_count)
1060 dm->active_vblank_irq_count--;
1062 dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1064 DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1066 mutex_unlock(&dm->dc_lock);
1069 static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
1072 int max_caps = dc->caps.max_links;
1073 struct vblank_workqueue *vblank_work;
1076 vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
1077 if (ZERO_OR_NULL_PTR(vblank_work)) {
1082 for (i = 0; i < max_caps; i++)
1083 INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
1088 static int amdgpu_dm_init(struct amdgpu_device *adev)
1090 struct dc_init_data init_data;
1091 #ifdef CONFIG_DRM_AMD_DC_HDCP
1092 struct dc_callback_init init_params;
1096 adev->dm.ddev = adev_to_drm(adev);
1097 adev->dm.adev = adev;
1099 /* Zero all the fields */
1100 memset(&init_data, 0, sizeof(init_data));
1101 #ifdef CONFIG_DRM_AMD_DC_HDCP
1102 memset(&init_params, 0, sizeof(init_params));
1105 mutex_init(&adev->dm.dc_lock);
1106 mutex_init(&adev->dm.audio_lock);
1107 #if defined(CONFIG_DRM_AMD_DC_DCN)
1108 spin_lock_init(&adev->dm.vblank_lock);
1111 if(amdgpu_dm_irq_init(adev)) {
1112 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1116 init_data.asic_id.chip_family = adev->family;
1118 init_data.asic_id.pci_revision_id = adev->pdev->revision;
1119 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1121 init_data.asic_id.vram_width = adev->gmc.vram_width;
1122 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1123 init_data.asic_id.atombios_base_address =
1124 adev->mode_info.atom_context->bios;
1126 init_data.driver = adev;
1128 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1130 if (!adev->dm.cgs_device) {
1131 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1135 init_data.cgs_device = adev->dm.cgs_device;
1137 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1139 switch (adev->asic_type) {
1144 init_data.flags.gpu_vm_support = true;
1145 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1146 init_data.flags.disable_dmcu = true;
1148 #if defined(CONFIG_DRM_AMD_DC_DCN)
1150 init_data.flags.gpu_vm_support = true;
1157 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1158 init_data.flags.fbc_support = true;
1160 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1161 init_data.flags.multi_mon_pp_mclk_switch = true;
1163 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1164 init_data.flags.disable_fractional_pwm = true;
1166 init_data.flags.power_down_display_on_boot = true;
1168 INIT_LIST_HEAD(&adev->dm.da_list);
1169 /* Display Core create. */
1170 adev->dm.dc = dc_create(&init_data);
1173 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1175 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1179 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1180 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1181 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1184 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1185 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1187 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1188 adev->dm.dc->debug.disable_stutter = true;
1190 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1191 adev->dm.dc->debug.disable_dsc = true;
1193 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1194 adev->dm.dc->debug.disable_clock_gate = true;
1196 r = dm_dmub_hw_init(adev);
1198 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1202 dc_hardware_init(adev->dm.dc);
1204 #if defined(CONFIG_DRM_AMD_DC_DCN)
1205 if (adev->apu_flags) {
1206 struct dc_phy_addr_space_config pa_config;
1208 mmhub_read_system_context(adev, &pa_config);
1210 // Call the DC init_memory func
1211 dc_setup_system_context(adev->dm.dc, &pa_config);
1215 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1216 if (!adev->dm.freesync_module) {
1218 "amdgpu: failed to initialize freesync_module.\n");
1220 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1221 adev->dm.freesync_module);
1223 amdgpu_dm_init_color_mod();
1225 #if defined(CONFIG_DRM_AMD_DC_DCN)
1226 if (adev->dm.dc->caps.max_links > 0) {
1227 adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
1229 if (!adev->dm.vblank_workqueue)
1230 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1232 DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
1236 #ifdef CONFIG_DRM_AMD_DC_HDCP
1237 if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1238 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1240 if (!adev->dm.hdcp_workqueue)
1241 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1243 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1245 dc_init_callbacks(adev->dm.dc, &init_params);
1248 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1249 adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1251 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1252 init_completion(&adev->dm.dmub_aux_transfer_done);
1253 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1254 if (!adev->dm.dmub_notify) {
1255 DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1258 amdgpu_dm_outbox_init(adev);
1261 if (amdgpu_dm_initialize_drm_device(adev)) {
1263 "amdgpu: failed to initialize sw for display support.\n");
1267 /* create fake encoders for MST */
1268 dm_dp_create_fake_mst_encoders(adev);
1270 /* TODO: Add_display_info? */
1272 /* TODO use dynamic cursor width */
1273 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1274 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1276 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1278 "amdgpu: failed to initialize sw for display support.\n");
1283 DRM_DEBUG_DRIVER("KMS initialized.\n");
1287 amdgpu_dm_fini(adev);
1292 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1296 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1297 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1300 amdgpu_dm_audio_fini(adev);
1302 amdgpu_dm_destroy_drm_device(&adev->dm);
1304 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1305 if (adev->dm.crc_rd_wrk) {
1306 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1307 kfree(adev->dm.crc_rd_wrk);
1308 adev->dm.crc_rd_wrk = NULL;
1311 #ifdef CONFIG_DRM_AMD_DC_HDCP
1312 if (adev->dm.hdcp_workqueue) {
1313 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1314 adev->dm.hdcp_workqueue = NULL;
1318 dc_deinit_callbacks(adev->dm.dc);
1321 #if defined(CONFIG_DRM_AMD_DC_DCN)
1322 if (adev->dm.vblank_workqueue) {
1323 adev->dm.vblank_workqueue->dm = NULL;
1324 kfree(adev->dm.vblank_workqueue);
1325 adev->dm.vblank_workqueue = NULL;
1329 if (adev->dm.dc->ctx->dmub_srv) {
1330 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1331 adev->dm.dc->ctx->dmub_srv = NULL;
1334 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1335 kfree(adev->dm.dmub_notify);
1336 adev->dm.dmub_notify = NULL;
1339 if (adev->dm.dmub_bo)
1340 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1341 &adev->dm.dmub_bo_gpu_addr,
1342 &adev->dm.dmub_bo_cpu_addr);
1344 /* DC Destroy TODO: Replace destroy DAL */
1346 dc_destroy(&adev->dm.dc);
1348 * TODO: pageflip, vlank interrupt
1350 * amdgpu_dm_irq_fini(adev);
1353 if (adev->dm.cgs_device) {
1354 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1355 adev->dm.cgs_device = NULL;
1357 if (adev->dm.freesync_module) {
1358 mod_freesync_destroy(adev->dm.freesync_module);
1359 adev->dm.freesync_module = NULL;
1362 mutex_destroy(&adev->dm.audio_lock);
1363 mutex_destroy(&adev->dm.dc_lock);
1368 static int load_dmcu_fw(struct amdgpu_device *adev)
1370 const char *fw_name_dmcu = NULL;
1372 const struct dmcu_firmware_header_v1_0 *hdr;
1374 switch(adev->asic_type) {
1375 #if defined(CONFIG_DRM_AMD_DC_SI)
1390 case CHIP_POLARIS11:
1391 case CHIP_POLARIS10:
1392 case CHIP_POLARIS12:
1400 case CHIP_SIENNA_CICHLID:
1401 case CHIP_NAVY_FLOUNDER:
1402 case CHIP_DIMGREY_CAVEFISH:
1406 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1409 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1410 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1411 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1412 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1417 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1421 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1422 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1426 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1428 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1429 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1430 adev->dm.fw_dmcu = NULL;
1434 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1439 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1441 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1443 release_firmware(adev->dm.fw_dmcu);
1444 adev->dm.fw_dmcu = NULL;
1448 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1449 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1450 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1451 adev->firmware.fw_size +=
1452 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1454 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1455 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1456 adev->firmware.fw_size +=
1457 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1459 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1461 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1466 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1468 struct amdgpu_device *adev = ctx;
1470 return dm_read_reg(adev->dm.dc->ctx, address);
1473 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1476 struct amdgpu_device *adev = ctx;
1478 return dm_write_reg(adev->dm.dc->ctx, address, value);
1481 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1483 struct dmub_srv_create_params create_params;
1484 struct dmub_srv_region_params region_params;
1485 struct dmub_srv_region_info region_info;
1486 struct dmub_srv_fb_params fb_params;
1487 struct dmub_srv_fb_info *fb_info;
1488 struct dmub_srv *dmub_srv;
1489 const struct dmcub_firmware_header_v1_0 *hdr;
1490 const char *fw_name_dmub;
1491 enum dmub_asic dmub_asic;
1492 enum dmub_status status;
1495 switch (adev->asic_type) {
1497 dmub_asic = DMUB_ASIC_DCN21;
1498 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1499 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1500 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1502 case CHIP_SIENNA_CICHLID:
1503 dmub_asic = DMUB_ASIC_DCN30;
1504 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1506 case CHIP_NAVY_FLOUNDER:
1507 dmub_asic = DMUB_ASIC_DCN30;
1508 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1511 dmub_asic = DMUB_ASIC_DCN301;
1512 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1514 case CHIP_DIMGREY_CAVEFISH:
1515 dmub_asic = DMUB_ASIC_DCN302;
1516 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1520 /* ASIC doesn't support DMUB. */
1524 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1526 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1530 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1532 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1536 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1538 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1539 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1540 AMDGPU_UCODE_ID_DMCUB;
1541 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1543 adev->firmware.fw_size +=
1544 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1546 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1547 adev->dm.dmcub_fw_version);
1550 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1552 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1553 dmub_srv = adev->dm.dmub_srv;
1556 DRM_ERROR("Failed to allocate DMUB service!\n");
1560 memset(&create_params, 0, sizeof(create_params));
1561 create_params.user_ctx = adev;
1562 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1563 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1564 create_params.asic = dmub_asic;
1566 /* Create the DMUB service. */
1567 status = dmub_srv_create(dmub_srv, &create_params);
1568 if (status != DMUB_STATUS_OK) {
1569 DRM_ERROR("Error creating DMUB service: %d\n", status);
1573 /* Calculate the size of all the regions for the DMUB service. */
1574 memset(®ion_params, 0, sizeof(region_params));
1576 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1577 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1578 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1579 region_params.vbios_size = adev->bios_size;
1580 region_params.fw_bss_data = region_params.bss_data_size ?
1581 adev->dm.dmub_fw->data +
1582 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1583 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1584 region_params.fw_inst_const =
1585 adev->dm.dmub_fw->data +
1586 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1589 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
1592 if (status != DMUB_STATUS_OK) {
1593 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1598 * Allocate a framebuffer based on the total size of all the regions.
1599 * TODO: Move this into GART.
1601 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1602 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1603 &adev->dm.dmub_bo_gpu_addr,
1604 &adev->dm.dmub_bo_cpu_addr);
1608 /* Rebase the regions on the framebuffer address. */
1609 memset(&fb_params, 0, sizeof(fb_params));
1610 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1611 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1612 fb_params.region_info = ®ion_info;
1614 adev->dm.dmub_fb_info =
1615 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1616 fb_info = adev->dm.dmub_fb_info;
1620 "Failed to allocate framebuffer info for DMUB service!\n");
1624 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1625 if (status != DMUB_STATUS_OK) {
1626 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1633 static int dm_sw_init(void *handle)
1635 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1638 r = dm_dmub_sw_init(adev);
1642 return load_dmcu_fw(adev);
1645 static int dm_sw_fini(void *handle)
1647 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1649 kfree(adev->dm.dmub_fb_info);
1650 adev->dm.dmub_fb_info = NULL;
1652 if (adev->dm.dmub_srv) {
1653 dmub_srv_destroy(adev->dm.dmub_srv);
1654 adev->dm.dmub_srv = NULL;
1657 release_firmware(adev->dm.dmub_fw);
1658 adev->dm.dmub_fw = NULL;
1660 release_firmware(adev->dm.fw_dmcu);
1661 adev->dm.fw_dmcu = NULL;
1666 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1668 struct amdgpu_dm_connector *aconnector;
1669 struct drm_connector *connector;
1670 struct drm_connector_list_iter iter;
1673 drm_connector_list_iter_begin(dev, &iter);
1674 drm_for_each_connector_iter(connector, &iter) {
1675 aconnector = to_amdgpu_dm_connector(connector);
1676 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1677 aconnector->mst_mgr.aux) {
1678 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1680 aconnector->base.base.id);
1682 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1684 DRM_ERROR("DM_MST: Failed to start MST\n");
1685 aconnector->dc_link->type =
1686 dc_connection_single;
1691 drm_connector_list_iter_end(&iter);
1696 static int dm_late_init(void *handle)
1698 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1700 struct dmcu_iram_parameters params;
1701 unsigned int linear_lut[16];
1703 struct dmcu *dmcu = NULL;
1706 dmcu = adev->dm.dc->res_pool->dmcu;
1708 for (i = 0; i < 16; i++)
1709 linear_lut[i] = 0xFFFF * i / 15;
1712 params.backlight_ramping_start = 0xCCCC;
1713 params.backlight_ramping_reduction = 0xCCCCCCCC;
1714 params.backlight_lut_array_size = 16;
1715 params.backlight_lut_array = linear_lut;
1717 /* Min backlight level after ABM reduction, Don't allow below 1%
1718 * 0xFFFF x 0.01 = 0x28F
1720 params.min_abm_backlight = 0x28F;
1722 /* In the case where abm is implemented on dmcub,
1723 * dmcu object will be null.
1724 * ABM 2.4 and up are implemented on dmcub.
1727 ret = dmcu_load_iram(dmcu, params);
1728 else if (adev->dm.dc->ctx->dmub_srv)
1729 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1734 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1737 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1739 struct amdgpu_dm_connector *aconnector;
1740 struct drm_connector *connector;
1741 struct drm_connector_list_iter iter;
1742 struct drm_dp_mst_topology_mgr *mgr;
1744 bool need_hotplug = false;
1746 drm_connector_list_iter_begin(dev, &iter);
1747 drm_for_each_connector_iter(connector, &iter) {
1748 aconnector = to_amdgpu_dm_connector(connector);
1749 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1750 aconnector->mst_port)
1753 mgr = &aconnector->mst_mgr;
1756 drm_dp_mst_topology_mgr_suspend(mgr);
1758 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1760 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1761 need_hotplug = true;
1765 drm_connector_list_iter_end(&iter);
1768 drm_kms_helper_hotplug_event(dev);
1771 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1773 struct smu_context *smu = &adev->smu;
1776 if (!is_support_sw_smu(adev))
1779 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1780 * on window driver dc implementation.
1781 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1782 * should be passed to smu during boot up and resume from s3.
1783 * boot up: dc calculate dcn watermark clock settings within dc_create,
1784 * dcn20_resource_construct
1785 * then call pplib functions below to pass the settings to smu:
1786 * smu_set_watermarks_for_clock_ranges
1787 * smu_set_watermarks_table
1788 * navi10_set_watermarks_table
1789 * smu_write_watermarks_table
1791 * For Renoir, clock settings of dcn watermark are also fixed values.
1792 * dc has implemented different flow for window driver:
1793 * dc_hardware_init / dc_set_power_state
1798 * smu_set_watermarks_for_clock_ranges
1799 * renoir_set_watermarks_table
1800 * smu_write_watermarks_table
1803 * dc_hardware_init -> amdgpu_dm_init
1804 * dc_set_power_state --> dm_resume
1806 * therefore, this function apply to navi10/12/14 but not Renoir
1809 switch(adev->asic_type) {
1818 ret = smu_write_watermarks_table(smu);
1820 DRM_ERROR("Failed to update WMTABLE!\n");
1828 * dm_hw_init() - Initialize DC device
1829 * @handle: The base driver device containing the amdgpu_dm device.
1831 * Initialize the &struct amdgpu_display_manager device. This involves calling
1832 * the initializers of each DM component, then populating the struct with them.
1834 * Although the function implies hardware initialization, both hardware and
1835 * software are initialized here. Splitting them out to their relevant init
1836 * hooks is a future TODO item.
1838 * Some notable things that are initialized here:
1840 * - Display Core, both software and hardware
1841 * - DC modules that we need (freesync and color management)
1842 * - DRM software states
1843 * - Interrupt sources and handlers
1845 * - Debug FS entries, if enabled
1847 static int dm_hw_init(void *handle)
1849 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1850 /* Create DAL display manager */
1851 amdgpu_dm_init(adev);
1852 amdgpu_dm_hpd_init(adev);
1858 * dm_hw_fini() - Teardown DC device
1859 * @handle: The base driver device containing the amdgpu_dm device.
1861 * Teardown components within &struct amdgpu_display_manager that require
1862 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1863 * were loaded. Also flush IRQ workqueues and disable them.
1865 static int dm_hw_fini(void *handle)
1867 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1869 amdgpu_dm_hpd_fini(adev);
1871 amdgpu_dm_irq_fini(adev);
1872 amdgpu_dm_fini(adev);
1877 static int dm_enable_vblank(struct drm_crtc *crtc);
1878 static void dm_disable_vblank(struct drm_crtc *crtc);
1880 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1881 struct dc_state *state, bool enable)
1883 enum dc_irq_source irq_source;
1884 struct amdgpu_crtc *acrtc;
1888 for (i = 0; i < state->stream_count; i++) {
1889 acrtc = get_crtc_by_otg_inst(
1890 adev, state->stream_status[i].primary_otg_inst);
1892 if (acrtc && state->stream_status[i].plane_count != 0) {
1893 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1894 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1895 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
1896 acrtc->crtc_id, enable ? "en" : "dis", rc);
1898 DRM_WARN("Failed to %s pflip interrupts\n",
1899 enable ? "enable" : "disable");
1902 rc = dm_enable_vblank(&acrtc->base);
1904 DRM_WARN("Failed to enable vblank interrupts\n");
1906 dm_disable_vblank(&acrtc->base);
1914 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1916 struct dc_state *context = NULL;
1917 enum dc_status res = DC_ERROR_UNEXPECTED;
1919 struct dc_stream_state *del_streams[MAX_PIPES];
1920 int del_streams_count = 0;
1922 memset(del_streams, 0, sizeof(del_streams));
1924 context = dc_create_state(dc);
1925 if (context == NULL)
1926 goto context_alloc_fail;
1928 dc_resource_state_copy_construct_current(dc, context);
1930 /* First remove from context all streams */
1931 for (i = 0; i < context->stream_count; i++) {
1932 struct dc_stream_state *stream = context->streams[i];
1934 del_streams[del_streams_count++] = stream;
1937 /* Remove all planes for removed streams and then remove the streams */
1938 for (i = 0; i < del_streams_count; i++) {
1939 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1940 res = DC_FAIL_DETACH_SURFACES;
1944 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1950 res = dc_validate_global_state(dc, context, false);
1953 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1957 res = dc_commit_state(dc, context);
1960 dc_release_state(context);
1966 static int dm_suspend(void *handle)
1968 struct amdgpu_device *adev = handle;
1969 struct amdgpu_display_manager *dm = &adev->dm;
1972 if (amdgpu_in_reset(adev)) {
1973 mutex_lock(&dm->dc_lock);
1975 #if defined(CONFIG_DRM_AMD_DC_DCN)
1976 dc_allow_idle_optimizations(adev->dm.dc, false);
1979 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1981 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1983 amdgpu_dm_commit_zero_streams(dm->dc);
1985 amdgpu_dm_irq_suspend(adev);
1990 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
1991 amdgpu_dm_crtc_secure_display_suspend(adev);
1993 WARN_ON(adev->dm.cached_state);
1994 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1996 s3_handle_mst(adev_to_drm(adev), true);
1998 amdgpu_dm_irq_suspend(adev);
2001 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2006 static struct amdgpu_dm_connector *
2007 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2008 struct drm_crtc *crtc)
2011 struct drm_connector_state *new_con_state;
2012 struct drm_connector *connector;
2013 struct drm_crtc *crtc_from_state;
2015 for_each_new_connector_in_state(state, connector, new_con_state, i) {
2016 crtc_from_state = new_con_state->crtc;
2018 if (crtc_from_state == crtc)
2019 return to_amdgpu_dm_connector(connector);
2025 static void emulated_link_detect(struct dc_link *link)
2027 struct dc_sink_init_data sink_init_data = { 0 };
2028 struct display_sink_capability sink_caps = { 0 };
2029 enum dc_edid_status edid_status;
2030 struct dc_context *dc_ctx = link->ctx;
2031 struct dc_sink *sink = NULL;
2032 struct dc_sink *prev_sink = NULL;
2034 link->type = dc_connection_none;
2035 prev_sink = link->local_sink;
2038 dc_sink_release(prev_sink);
2040 switch (link->connector_signal) {
2041 case SIGNAL_TYPE_HDMI_TYPE_A: {
2042 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2043 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2047 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2048 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2049 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2053 case SIGNAL_TYPE_DVI_DUAL_LINK: {
2054 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2055 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2059 case SIGNAL_TYPE_LVDS: {
2060 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2061 sink_caps.signal = SIGNAL_TYPE_LVDS;
2065 case SIGNAL_TYPE_EDP: {
2066 sink_caps.transaction_type =
2067 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2068 sink_caps.signal = SIGNAL_TYPE_EDP;
2072 case SIGNAL_TYPE_DISPLAY_PORT: {
2073 sink_caps.transaction_type =
2074 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2075 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2080 DC_ERROR("Invalid connector type! signal:%d\n",
2081 link->connector_signal);
2085 sink_init_data.link = link;
2086 sink_init_data.sink_signal = sink_caps.signal;
2088 sink = dc_sink_create(&sink_init_data);
2090 DC_ERROR("Failed to create sink!\n");
2094 /* dc_sink_create returns a new reference */
2095 link->local_sink = sink;
2097 edid_status = dm_helpers_read_local_edid(
2102 if (edid_status != EDID_OK)
2103 DC_ERROR("Failed to read EDID");
2107 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2108 struct amdgpu_display_manager *dm)
2111 struct dc_surface_update surface_updates[MAX_SURFACES];
2112 struct dc_plane_info plane_infos[MAX_SURFACES];
2113 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2114 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2115 struct dc_stream_update stream_update;
2119 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2122 dm_error("Failed to allocate update bundle\n");
2126 for (k = 0; k < dc_state->stream_count; k++) {
2127 bundle->stream_update.stream = dc_state->streams[k];
2129 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2130 bundle->surface_updates[m].surface =
2131 dc_state->stream_status->plane_states[m];
2132 bundle->surface_updates[m].surface->force_full_update =
2135 dc_commit_updates_for_stream(
2136 dm->dc, bundle->surface_updates,
2137 dc_state->stream_status->plane_count,
2138 dc_state->streams[k], &bundle->stream_update, dc_state);
2147 static void dm_set_dpms_off(struct dc_link *link)
2149 struct dc_stream_state *stream_state;
2150 struct amdgpu_dm_connector *aconnector = link->priv;
2151 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2152 struct dc_stream_update stream_update;
2153 bool dpms_off = true;
2155 memset(&stream_update, 0, sizeof(stream_update));
2156 stream_update.dpms_off = &dpms_off;
2158 mutex_lock(&adev->dm.dc_lock);
2159 stream_state = dc_stream_find_from_link(link);
2161 if (stream_state == NULL) {
2162 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2163 mutex_unlock(&adev->dm.dc_lock);
2167 stream_update.stream = stream_state;
2168 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2169 stream_state, &stream_update,
2170 stream_state->ctx->dc->current_state);
2171 mutex_unlock(&adev->dm.dc_lock);
2174 static int dm_resume(void *handle)
2176 struct amdgpu_device *adev = handle;
2177 struct drm_device *ddev = adev_to_drm(adev);
2178 struct amdgpu_display_manager *dm = &adev->dm;
2179 struct amdgpu_dm_connector *aconnector;
2180 struct drm_connector *connector;
2181 struct drm_connector_list_iter iter;
2182 struct drm_crtc *crtc;
2183 struct drm_crtc_state *new_crtc_state;
2184 struct dm_crtc_state *dm_new_crtc_state;
2185 struct drm_plane *plane;
2186 struct drm_plane_state *new_plane_state;
2187 struct dm_plane_state *dm_new_plane_state;
2188 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2189 enum dc_connection_type new_connection_type = dc_connection_none;
2190 struct dc_state *dc_state;
2193 if (amdgpu_in_reset(adev)) {
2194 dc_state = dm->cached_dc_state;
2196 r = dm_dmub_hw_init(adev);
2198 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2200 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2203 amdgpu_dm_irq_resume_early(adev);
2205 for (i = 0; i < dc_state->stream_count; i++) {
2206 dc_state->streams[i]->mode_changed = true;
2207 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2208 dc_state->stream_status->plane_states[j]->update_flags.raw
2213 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2215 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2217 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2219 dc_release_state(dm->cached_dc_state);
2220 dm->cached_dc_state = NULL;
2222 amdgpu_dm_irq_resume_late(adev);
2224 mutex_unlock(&dm->dc_lock);
2228 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2229 dc_release_state(dm_state->context);
2230 dm_state->context = dc_create_state(dm->dc);
2231 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2232 dc_resource_state_construct(dm->dc, dm_state->context);
2234 /* Before powering on DC we need to re-initialize DMUB. */
2235 r = dm_dmub_hw_init(adev);
2237 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2239 /* power on hardware */
2240 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2242 /* program HPD filter */
2246 * early enable HPD Rx IRQ, should be done before set mode as short
2247 * pulse interrupts are used for MST
2249 amdgpu_dm_irq_resume_early(adev);
2251 /* On resume we need to rewrite the MSTM control bits to enable MST*/
2252 s3_handle_mst(ddev, false);
2255 drm_connector_list_iter_begin(ddev, &iter);
2256 drm_for_each_connector_iter(connector, &iter) {
2257 aconnector = to_amdgpu_dm_connector(connector);
2260 * this is the case when traversing through already created
2261 * MST connectors, should be skipped
2263 if (aconnector->mst_port)
2266 mutex_lock(&aconnector->hpd_lock);
2267 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2268 DRM_ERROR("KMS: Failed to detect connector\n");
2270 if (aconnector->base.force && new_connection_type == dc_connection_none)
2271 emulated_link_detect(aconnector->dc_link);
2273 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2275 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2276 aconnector->fake_enable = false;
2278 if (aconnector->dc_sink)
2279 dc_sink_release(aconnector->dc_sink);
2280 aconnector->dc_sink = NULL;
2281 amdgpu_dm_update_connector_after_detect(aconnector);
2282 mutex_unlock(&aconnector->hpd_lock);
2284 drm_connector_list_iter_end(&iter);
2286 /* Force mode set in atomic commit */
2287 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2288 new_crtc_state->active_changed = true;
2291 * atomic_check is expected to create the dc states. We need to release
2292 * them here, since they were duplicated as part of the suspend
2295 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2296 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2297 if (dm_new_crtc_state->stream) {
2298 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2299 dc_stream_release(dm_new_crtc_state->stream);
2300 dm_new_crtc_state->stream = NULL;
2304 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2305 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2306 if (dm_new_plane_state->dc_state) {
2307 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2308 dc_plane_state_release(dm_new_plane_state->dc_state);
2309 dm_new_plane_state->dc_state = NULL;
2313 drm_atomic_helper_resume(ddev, dm->cached_state);
2315 dm->cached_state = NULL;
2317 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
2318 amdgpu_dm_crtc_secure_display_resume(adev);
2321 amdgpu_dm_irq_resume_late(adev);
2323 amdgpu_dm_smu_write_watermarks_table(adev);
2331 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2332 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2333 * the base driver's device list to be initialized and torn down accordingly.
2335 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2338 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2340 .early_init = dm_early_init,
2341 .late_init = dm_late_init,
2342 .sw_init = dm_sw_init,
2343 .sw_fini = dm_sw_fini,
2344 .hw_init = dm_hw_init,
2345 .hw_fini = dm_hw_fini,
2346 .suspend = dm_suspend,
2347 .resume = dm_resume,
2348 .is_idle = dm_is_idle,
2349 .wait_for_idle = dm_wait_for_idle,
2350 .check_soft_reset = dm_check_soft_reset,
2351 .soft_reset = dm_soft_reset,
2352 .set_clockgating_state = dm_set_clockgating_state,
2353 .set_powergating_state = dm_set_powergating_state,
2356 const struct amdgpu_ip_block_version dm_ip_block =
2358 .type = AMD_IP_BLOCK_TYPE_DCE,
2362 .funcs = &amdgpu_dm_funcs,
2372 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2373 .fb_create = amdgpu_display_user_framebuffer_create,
2374 .get_format_info = amd_get_format_info,
2375 .output_poll_changed = drm_fb_helper_output_poll_changed,
2376 .atomic_check = amdgpu_dm_atomic_check,
2377 .atomic_commit = drm_atomic_helper_commit,
2380 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2381 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2384 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2386 u32 max_cll, min_cll, max, min, q, r;
2387 struct amdgpu_dm_backlight_caps *caps;
2388 struct amdgpu_display_manager *dm;
2389 struct drm_connector *conn_base;
2390 struct amdgpu_device *adev;
2391 struct dc_link *link = NULL;
2392 static const u8 pre_computed_values[] = {
2393 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2394 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2396 if (!aconnector || !aconnector->dc_link)
2399 link = aconnector->dc_link;
2400 if (link->connector_signal != SIGNAL_TYPE_EDP)
2403 conn_base = &aconnector->base;
2404 adev = drm_to_adev(conn_base->dev);
2406 caps = &dm->backlight_caps;
2407 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2408 caps->aux_support = false;
2409 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2410 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2412 if (caps->ext_caps->bits.oled == 1 ||
2413 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2414 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2415 caps->aux_support = true;
2417 if (amdgpu_backlight == 0)
2418 caps->aux_support = false;
2419 else if (amdgpu_backlight == 1)
2420 caps->aux_support = true;
2422 /* From the specification (CTA-861-G), for calculating the maximum
2423 * luminance we need to use:
2424 * Luminance = 50*2**(CV/32)
2425 * Where CV is a one-byte value.
2426 * For calculating this expression we may need float point precision;
2427 * to avoid this complexity level, we take advantage that CV is divided
2428 * by a constant. From the Euclids division algorithm, we know that CV
2429 * can be written as: CV = 32*q + r. Next, we replace CV in the
2430 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2431 * need to pre-compute the value of r/32. For pre-computing the values
2432 * We just used the following Ruby line:
2433 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2434 * The results of the above expressions can be verified at
2435 * pre_computed_values.
2439 max = (1 << q) * pre_computed_values[r];
2441 // min luminance: maxLum * (CV/255)^2 / 100
2442 q = DIV_ROUND_CLOSEST(min_cll, 255);
2443 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2445 caps->aux_max_input_signal = max;
2446 caps->aux_min_input_signal = min;
2449 void amdgpu_dm_update_connector_after_detect(
2450 struct amdgpu_dm_connector *aconnector)
2452 struct drm_connector *connector = &aconnector->base;
2453 struct drm_device *dev = connector->dev;
2454 struct dc_sink *sink;
2456 /* MST handled by drm_mst framework */
2457 if (aconnector->mst_mgr.mst_state == true)
2460 sink = aconnector->dc_link->local_sink;
2462 dc_sink_retain(sink);
2465 * Edid mgmt connector gets first update only in mode_valid hook and then
2466 * the connector sink is set to either fake or physical sink depends on link status.
2467 * Skip if already done during boot.
2469 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2470 && aconnector->dc_em_sink) {
2473 * For S3 resume with headless use eml_sink to fake stream
2474 * because on resume connector->sink is set to NULL
2476 mutex_lock(&dev->mode_config.mutex);
2479 if (aconnector->dc_sink) {
2480 amdgpu_dm_update_freesync_caps(connector, NULL);
2482 * retain and release below are used to
2483 * bump up refcount for sink because the link doesn't point
2484 * to it anymore after disconnect, so on next crtc to connector
2485 * reshuffle by UMD we will get into unwanted dc_sink release
2487 dc_sink_release(aconnector->dc_sink);
2489 aconnector->dc_sink = sink;
2490 dc_sink_retain(aconnector->dc_sink);
2491 amdgpu_dm_update_freesync_caps(connector,
2494 amdgpu_dm_update_freesync_caps(connector, NULL);
2495 if (!aconnector->dc_sink) {
2496 aconnector->dc_sink = aconnector->dc_em_sink;
2497 dc_sink_retain(aconnector->dc_sink);
2501 mutex_unlock(&dev->mode_config.mutex);
2504 dc_sink_release(sink);
2509 * TODO: temporary guard to look for proper fix
2510 * if this sink is MST sink, we should not do anything
2512 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2513 dc_sink_release(sink);
2517 if (aconnector->dc_sink == sink) {
2519 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2522 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2523 aconnector->connector_id);
2525 dc_sink_release(sink);
2529 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2530 aconnector->connector_id, aconnector->dc_sink, sink);
2532 mutex_lock(&dev->mode_config.mutex);
2535 * 1. Update status of the drm connector
2536 * 2. Send an event and let userspace tell us what to do
2540 * TODO: check if we still need the S3 mode update workaround.
2541 * If yes, put it here.
2543 if (aconnector->dc_sink) {
2544 amdgpu_dm_update_freesync_caps(connector, NULL);
2545 dc_sink_release(aconnector->dc_sink);
2548 aconnector->dc_sink = sink;
2549 dc_sink_retain(aconnector->dc_sink);
2550 if (sink->dc_edid.length == 0) {
2551 aconnector->edid = NULL;
2552 if (aconnector->dc_link->aux_mode) {
2553 drm_dp_cec_unset_edid(
2554 &aconnector->dm_dp_aux.aux);
2558 (struct edid *)sink->dc_edid.raw_edid;
2560 drm_connector_update_edid_property(connector,
2562 if (aconnector->dc_link->aux_mode)
2563 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2567 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2568 update_connector_ext_caps(aconnector);
2570 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2571 amdgpu_dm_update_freesync_caps(connector, NULL);
2572 drm_connector_update_edid_property(connector, NULL);
2573 aconnector->num_modes = 0;
2574 dc_sink_release(aconnector->dc_sink);
2575 aconnector->dc_sink = NULL;
2576 aconnector->edid = NULL;
2577 #ifdef CONFIG_DRM_AMD_DC_HDCP
2578 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2579 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2580 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2584 mutex_unlock(&dev->mode_config.mutex);
2586 update_subconnector_property(aconnector);
2589 dc_sink_release(sink);
2592 static void handle_hpd_irq(void *param)
2594 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2595 struct drm_connector *connector = &aconnector->base;
2596 struct drm_device *dev = connector->dev;
2597 enum dc_connection_type new_connection_type = dc_connection_none;
2598 struct amdgpu_device *adev = drm_to_adev(dev);
2599 #ifdef CONFIG_DRM_AMD_DC_HDCP
2600 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2603 if (adev->dm.disable_hpd_irq)
2607 * In case of failure or MST no need to update connector status or notify the OS
2608 * since (for MST case) MST does this in its own context.
2610 mutex_lock(&aconnector->hpd_lock);
2612 #ifdef CONFIG_DRM_AMD_DC_HDCP
2613 if (adev->dm.hdcp_workqueue) {
2614 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2615 dm_con_state->update_hdcp = true;
2618 if (aconnector->fake_enable)
2619 aconnector->fake_enable = false;
2621 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2622 DRM_ERROR("KMS: Failed to detect connector\n");
2624 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2625 emulated_link_detect(aconnector->dc_link);
2628 drm_modeset_lock_all(dev);
2629 dm_restore_drm_connector_state(dev, connector);
2630 drm_modeset_unlock_all(dev);
2632 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2633 drm_kms_helper_hotplug_event(dev);
2635 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2636 if (new_connection_type == dc_connection_none &&
2637 aconnector->dc_link->type == dc_connection_none)
2638 dm_set_dpms_off(aconnector->dc_link);
2640 amdgpu_dm_update_connector_after_detect(aconnector);
2642 drm_modeset_lock_all(dev);
2643 dm_restore_drm_connector_state(dev, connector);
2644 drm_modeset_unlock_all(dev);
2646 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2647 drm_kms_helper_hotplug_event(dev);
2649 mutex_unlock(&aconnector->hpd_lock);
2653 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2655 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2657 bool new_irq_handled = false;
2659 int dpcd_bytes_to_read;
2661 const int max_process_count = 30;
2662 int process_count = 0;
2664 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2666 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2667 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2668 /* DPCD 0x200 - 0x201 for downstream IRQ */
2669 dpcd_addr = DP_SINK_COUNT;
2671 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2672 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2673 dpcd_addr = DP_SINK_COUNT_ESI;
2676 dret = drm_dp_dpcd_read(
2677 &aconnector->dm_dp_aux.aux,
2680 dpcd_bytes_to_read);
2682 while (dret == dpcd_bytes_to_read &&
2683 process_count < max_process_count) {
2689 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2690 /* handle HPD short pulse irq */
2691 if (aconnector->mst_mgr.mst_state)
2693 &aconnector->mst_mgr,
2697 if (new_irq_handled) {
2698 /* ACK at DPCD to notify down stream */
2699 const int ack_dpcd_bytes_to_write =
2700 dpcd_bytes_to_read - 1;
2702 for (retry = 0; retry < 3; retry++) {
2705 wret = drm_dp_dpcd_write(
2706 &aconnector->dm_dp_aux.aux,
2709 ack_dpcd_bytes_to_write);
2710 if (wret == ack_dpcd_bytes_to_write)
2714 /* check if there is new irq to be handled */
2715 dret = drm_dp_dpcd_read(
2716 &aconnector->dm_dp_aux.aux,
2719 dpcd_bytes_to_read);
2721 new_irq_handled = false;
2727 if (process_count == max_process_count)
2728 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2731 static void handle_hpd_rx_irq(void *param)
2733 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2734 struct drm_connector *connector = &aconnector->base;
2735 struct drm_device *dev = connector->dev;
2736 struct dc_link *dc_link = aconnector->dc_link;
2737 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2738 bool result = false;
2739 enum dc_connection_type new_connection_type = dc_connection_none;
2740 struct amdgpu_device *adev = drm_to_adev(dev);
2741 union hpd_irq_data hpd_irq_data;
2743 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2745 if (adev->dm.disable_hpd_irq)
2750 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2751 * conflict, after implement i2c helper, this mutex should be
2754 mutex_lock(&aconnector->hpd_lock);
2756 read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2758 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2759 (dc_link->type == dc_connection_mst_branch)) {
2760 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2762 dm_handle_hpd_rx_irq(aconnector);
2764 } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2766 dm_handle_hpd_rx_irq(aconnector);
2771 if (!amdgpu_in_reset(adev)) {
2772 mutex_lock(&adev->dm.dc_lock);
2773 #ifdef CONFIG_DRM_AMD_DC_HDCP
2774 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2776 result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2778 mutex_unlock(&adev->dm.dc_lock);
2782 if (result && !is_mst_root_connector) {
2783 /* Downstream Port status changed. */
2784 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2785 DRM_ERROR("KMS: Failed to detect connector\n");
2787 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2788 emulated_link_detect(dc_link);
2790 if (aconnector->fake_enable)
2791 aconnector->fake_enable = false;
2793 amdgpu_dm_update_connector_after_detect(aconnector);
2796 drm_modeset_lock_all(dev);
2797 dm_restore_drm_connector_state(dev, connector);
2798 drm_modeset_unlock_all(dev);
2800 drm_kms_helper_hotplug_event(dev);
2801 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2803 if (aconnector->fake_enable)
2804 aconnector->fake_enable = false;
2806 amdgpu_dm_update_connector_after_detect(aconnector);
2809 drm_modeset_lock_all(dev);
2810 dm_restore_drm_connector_state(dev, connector);
2811 drm_modeset_unlock_all(dev);
2813 drm_kms_helper_hotplug_event(dev);
2816 #ifdef CONFIG_DRM_AMD_DC_HDCP
2817 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2818 if (adev->dm.hdcp_workqueue)
2819 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2823 if (dc_link->type != dc_connection_mst_branch)
2824 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2826 mutex_unlock(&aconnector->hpd_lock);
2829 static void register_hpd_handlers(struct amdgpu_device *adev)
2831 struct drm_device *dev = adev_to_drm(adev);
2832 struct drm_connector *connector;
2833 struct amdgpu_dm_connector *aconnector;
2834 const struct dc_link *dc_link;
2835 struct dc_interrupt_params int_params = {0};
2837 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2838 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2840 list_for_each_entry(connector,
2841 &dev->mode_config.connector_list, head) {
2843 aconnector = to_amdgpu_dm_connector(connector);
2844 dc_link = aconnector->dc_link;
2846 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2847 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2848 int_params.irq_source = dc_link->irq_source_hpd;
2850 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2852 (void *) aconnector);
2855 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2857 /* Also register for DP short pulse (hpd_rx). */
2858 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2859 int_params.irq_source = dc_link->irq_source_hpd_rx;
2861 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2863 (void *) aconnector);
2868 #if defined(CONFIG_DRM_AMD_DC_SI)
2869 /* Register IRQ sources and initialize IRQ callbacks */
2870 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2872 struct dc *dc = adev->dm.dc;
2873 struct common_irq_params *c_irq_params;
2874 struct dc_interrupt_params int_params = {0};
2877 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2879 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2880 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2883 * Actions of amdgpu_irq_add_id():
2884 * 1. Register a set() function with base driver.
2885 * Base driver will call set() function to enable/disable an
2886 * interrupt in DC hardware.
2887 * 2. Register amdgpu_dm_irq_handler().
2888 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2889 * coming from DC hardware.
2890 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2891 * for acknowledging and handling. */
2893 /* Use VBLANK interrupt */
2894 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2895 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2897 DRM_ERROR("Failed to add crtc irq id!\n");
2901 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2902 int_params.irq_source =
2903 dc_interrupt_to_irq_source(dc, i+1 , 0);
2905 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2907 c_irq_params->adev = adev;
2908 c_irq_params->irq_src = int_params.irq_source;
2910 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2911 dm_crtc_high_irq, c_irq_params);
2914 /* Use GRPH_PFLIP interrupt */
2915 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2916 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2917 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2919 DRM_ERROR("Failed to add page flip irq id!\n");
2923 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2924 int_params.irq_source =
2925 dc_interrupt_to_irq_source(dc, i, 0);
2927 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2929 c_irq_params->adev = adev;
2930 c_irq_params->irq_src = int_params.irq_source;
2932 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2933 dm_pflip_high_irq, c_irq_params);
2938 r = amdgpu_irq_add_id(adev, client_id,
2939 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2941 DRM_ERROR("Failed to add hpd irq id!\n");
2945 register_hpd_handlers(adev);
2951 /* Register IRQ sources and initialize IRQ callbacks */
2952 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2954 struct dc *dc = adev->dm.dc;
2955 struct common_irq_params *c_irq_params;
2956 struct dc_interrupt_params int_params = {0};
2959 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2961 if (adev->asic_type >= CHIP_VEGA10)
2962 client_id = SOC15_IH_CLIENTID_DCE;
2964 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2965 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2968 * Actions of amdgpu_irq_add_id():
2969 * 1. Register a set() function with base driver.
2970 * Base driver will call set() function to enable/disable an
2971 * interrupt in DC hardware.
2972 * 2. Register amdgpu_dm_irq_handler().
2973 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2974 * coming from DC hardware.
2975 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2976 * for acknowledging and handling. */
2978 /* Use VBLANK interrupt */
2979 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2980 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2982 DRM_ERROR("Failed to add crtc irq id!\n");
2986 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2987 int_params.irq_source =
2988 dc_interrupt_to_irq_source(dc, i, 0);
2990 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2992 c_irq_params->adev = adev;
2993 c_irq_params->irq_src = int_params.irq_source;
2995 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2996 dm_crtc_high_irq, c_irq_params);
2999 /* Use VUPDATE interrupt */
3000 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3001 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3003 DRM_ERROR("Failed to add vupdate irq id!\n");
3007 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3008 int_params.irq_source =
3009 dc_interrupt_to_irq_source(dc, i, 0);
3011 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3013 c_irq_params->adev = adev;
3014 c_irq_params->irq_src = int_params.irq_source;
3016 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3017 dm_vupdate_high_irq, c_irq_params);
3020 /* Use GRPH_PFLIP interrupt */
3021 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3022 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3023 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3025 DRM_ERROR("Failed to add page flip irq id!\n");
3029 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3030 int_params.irq_source =
3031 dc_interrupt_to_irq_source(dc, i, 0);
3033 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3035 c_irq_params->adev = adev;
3036 c_irq_params->irq_src = int_params.irq_source;
3038 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3039 dm_pflip_high_irq, c_irq_params);
3044 r = amdgpu_irq_add_id(adev, client_id,
3045 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3047 DRM_ERROR("Failed to add hpd irq id!\n");
3051 register_hpd_handlers(adev);
3056 #if defined(CONFIG_DRM_AMD_DC_DCN)
3057 /* Register IRQ sources and initialize IRQ callbacks */
3058 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3060 struct dc *dc = adev->dm.dc;
3061 struct common_irq_params *c_irq_params;
3062 struct dc_interrupt_params int_params = {0};
3065 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3066 static const unsigned int vrtl_int_srcid[] = {
3067 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3068 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3069 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3070 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3071 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3072 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3076 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3077 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3080 * Actions of amdgpu_irq_add_id():
3081 * 1. Register a set() function with base driver.
3082 * Base driver will call set() function to enable/disable an
3083 * interrupt in DC hardware.
3084 * 2. Register amdgpu_dm_irq_handler().
3085 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3086 * coming from DC hardware.
3087 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3088 * for acknowledging and handling.
3091 /* Use VSTARTUP interrupt */
3092 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3093 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3095 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3098 DRM_ERROR("Failed to add crtc irq id!\n");
3102 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3103 int_params.irq_source =
3104 dc_interrupt_to_irq_source(dc, i, 0);
3106 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3108 c_irq_params->adev = adev;
3109 c_irq_params->irq_src = int_params.irq_source;
3111 amdgpu_dm_irq_register_interrupt(
3112 adev, &int_params, dm_crtc_high_irq, c_irq_params);
3115 /* Use otg vertical line interrupt */
3116 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3117 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3118 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3119 vrtl_int_srcid[i], &adev->vline0_irq);
3122 DRM_ERROR("Failed to add vline0 irq id!\n");
3126 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3127 int_params.irq_source =
3128 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3130 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3131 DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3135 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3136 - DC_IRQ_SOURCE_DC1_VLINE0];
3138 c_irq_params->adev = adev;
3139 c_irq_params->irq_src = int_params.irq_source;
3141 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3142 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3146 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3147 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3148 * to trigger at end of each vblank, regardless of state of the lock,
3149 * matching DCE behaviour.
3151 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3152 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3154 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3157 DRM_ERROR("Failed to add vupdate irq id!\n");
3161 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3162 int_params.irq_source =
3163 dc_interrupt_to_irq_source(dc, i, 0);
3165 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3167 c_irq_params->adev = adev;
3168 c_irq_params->irq_src = int_params.irq_source;
3170 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3171 dm_vupdate_high_irq, c_irq_params);
3174 /* Use GRPH_PFLIP interrupt */
3175 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3176 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3178 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3180 DRM_ERROR("Failed to add page flip irq id!\n");
3184 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3185 int_params.irq_source =
3186 dc_interrupt_to_irq_source(dc, i, 0);
3188 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3190 c_irq_params->adev = adev;
3191 c_irq_params->irq_src = int_params.irq_source;
3193 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3194 dm_pflip_high_irq, c_irq_params);
3199 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3202 DRM_ERROR("Failed to add hpd irq id!\n");
3206 register_hpd_handlers(adev);
3210 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3211 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3213 struct dc *dc = adev->dm.dc;
3214 struct common_irq_params *c_irq_params;
3215 struct dc_interrupt_params int_params = {0};
3218 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3219 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3221 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3222 &adev->dmub_outbox_irq);
3224 DRM_ERROR("Failed to add outbox irq id!\n");
3228 if (dc->ctx->dmub_srv) {
3229 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3230 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3231 int_params.irq_source =
3232 dc_interrupt_to_irq_source(dc, i, 0);
3234 c_irq_params = &adev->dm.dmub_outbox_params[0];
3236 c_irq_params->adev = adev;
3237 c_irq_params->irq_src = int_params.irq_source;
3239 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3240 dm_dmub_outbox1_low_irq, c_irq_params);
3248 * Acquires the lock for the atomic state object and returns
3249 * the new atomic state.
3251 * This should only be called during atomic check.
3253 static int dm_atomic_get_state(struct drm_atomic_state *state,
3254 struct dm_atomic_state **dm_state)
3256 struct drm_device *dev = state->dev;
3257 struct amdgpu_device *adev = drm_to_adev(dev);
3258 struct amdgpu_display_manager *dm = &adev->dm;
3259 struct drm_private_state *priv_state;
3264 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3265 if (IS_ERR(priv_state))
3266 return PTR_ERR(priv_state);
3268 *dm_state = to_dm_atomic_state(priv_state);
3273 static struct dm_atomic_state *
3274 dm_atomic_get_new_state(struct drm_atomic_state *state)
3276 struct drm_device *dev = state->dev;
3277 struct amdgpu_device *adev = drm_to_adev(dev);
3278 struct amdgpu_display_manager *dm = &adev->dm;
3279 struct drm_private_obj *obj;
3280 struct drm_private_state *new_obj_state;
3283 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3284 if (obj->funcs == dm->atomic_obj.funcs)
3285 return to_dm_atomic_state(new_obj_state);
3291 static struct drm_private_state *
3292 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3294 struct dm_atomic_state *old_state, *new_state;
3296 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3300 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3302 old_state = to_dm_atomic_state(obj->state);
3304 if (old_state && old_state->context)
3305 new_state->context = dc_copy_state(old_state->context);
3307 if (!new_state->context) {
3312 return &new_state->base;
3315 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3316 struct drm_private_state *state)
3318 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3320 if (dm_state && dm_state->context)
3321 dc_release_state(dm_state->context);
3326 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3327 .atomic_duplicate_state = dm_atomic_duplicate_state,
3328 .atomic_destroy_state = dm_atomic_destroy_state,
3331 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3333 struct dm_atomic_state *state;
3336 adev->mode_info.mode_config_initialized = true;
3338 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3339 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3341 adev_to_drm(adev)->mode_config.max_width = 16384;
3342 adev_to_drm(adev)->mode_config.max_height = 16384;
3344 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3345 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3346 /* indicates support for immediate flip */
3347 adev_to_drm(adev)->mode_config.async_page_flip = true;
3349 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3351 state = kzalloc(sizeof(*state), GFP_KERNEL);
3355 state->context = dc_create_state(adev->dm.dc);
3356 if (!state->context) {
3361 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3363 drm_atomic_private_obj_init(adev_to_drm(adev),
3364 &adev->dm.atomic_obj,
3366 &dm_atomic_state_funcs);
3368 r = amdgpu_display_modeset_create_props(adev);
3370 dc_release_state(state->context);
3375 r = amdgpu_dm_audio_init(adev);
3377 dc_release_state(state->context);
3385 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3386 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3387 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3389 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3390 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3392 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3394 #if defined(CONFIG_ACPI)
3395 struct amdgpu_dm_backlight_caps caps;
3397 memset(&caps, 0, sizeof(caps));
3399 if (dm->backlight_caps.caps_valid)
3402 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3403 if (caps.caps_valid) {
3404 dm->backlight_caps.caps_valid = true;
3405 if (caps.aux_support)
3407 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3408 dm->backlight_caps.max_input_signal = caps.max_input_signal;
3410 dm->backlight_caps.min_input_signal =
3411 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3412 dm->backlight_caps.max_input_signal =
3413 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3416 if (dm->backlight_caps.aux_support)
3419 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3420 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3424 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3425 unsigned *min, unsigned *max)
3430 if (caps->aux_support) {
3431 // Firmware limits are in nits, DC API wants millinits.
3432 *max = 1000 * caps->aux_max_input_signal;
3433 *min = 1000 * caps->aux_min_input_signal;
3435 // Firmware limits are 8-bit, PWM control is 16-bit.
3436 *max = 0x101 * caps->max_input_signal;
3437 *min = 0x101 * caps->min_input_signal;
3442 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3443 uint32_t brightness)
3447 if (!get_brightness_range(caps, &min, &max))
3450 // Rescale 0..255 to min..max
3451 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3452 AMDGPU_MAX_BL_LEVEL);
3455 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3456 uint32_t brightness)
3460 if (!get_brightness_range(caps, &min, &max))
3463 if (brightness < min)
3465 // Rescale min..max to 0..255
3466 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3470 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3472 struct amdgpu_display_manager *dm = bl_get_data(bd);
3473 struct amdgpu_dm_backlight_caps caps;
3474 struct dc_link *link[AMDGPU_DM_MAX_NUM_EDP];
3479 amdgpu_dm_update_backlight_caps(dm);
3480 caps = dm->backlight_caps;
3482 for (i = 0; i < dm->num_of_edps; i++)
3483 link[i] = (struct dc_link *)dm->backlight_link[i];
3485 brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3486 // Change brightness based on AUX property
3487 if (caps.aux_support) {
3488 for (i = 0; i < dm->num_of_edps; i++) {
3489 rc = dc_link_set_backlight_level_nits(link[i], true, brightness,
3490 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3492 DRM_ERROR("DM: Failed to update backlight via AUX on eDP[%d]\n", i);
3497 for (i = 0; i < dm->num_of_edps; i++) {
3498 rc = dc_link_set_backlight_level(dm->backlight_link[i], brightness, 0);
3500 DRM_ERROR("DM: Failed to update backlight on eDP[%d]\n", i);
3509 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3511 struct amdgpu_display_manager *dm = bl_get_data(bd);
3512 struct amdgpu_dm_backlight_caps caps;
3514 amdgpu_dm_update_backlight_caps(dm);
3515 caps = dm->backlight_caps;
3517 if (caps.aux_support) {
3518 struct dc_link *link = (struct dc_link *)dm->backlight_link[0];
3522 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3524 return bd->props.brightness;
3525 return convert_brightness_to_user(&caps, avg);
3527 int ret = dc_link_get_backlight_level(dm->backlight_link[0]);
3529 if (ret == DC_ERROR_UNEXPECTED)
3530 return bd->props.brightness;
3531 return convert_brightness_to_user(&caps, ret);
3535 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3536 .options = BL_CORE_SUSPENDRESUME,
3537 .get_brightness = amdgpu_dm_backlight_get_brightness,
3538 .update_status = amdgpu_dm_backlight_update_status,
3542 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3545 struct backlight_properties props = { 0 };
3547 amdgpu_dm_update_backlight_caps(dm);
3549 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3550 props.brightness = AMDGPU_MAX_BL_LEVEL;
3551 props.type = BACKLIGHT_RAW;
3553 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3554 adev_to_drm(dm->adev)->primary->index);
3556 dm->backlight_dev = backlight_device_register(bl_name,
3557 adev_to_drm(dm->adev)->dev,
3559 &amdgpu_dm_backlight_ops,
3562 if (IS_ERR(dm->backlight_dev))
3563 DRM_ERROR("DM: Backlight registration failed!\n");
3565 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3570 static int initialize_plane(struct amdgpu_display_manager *dm,
3571 struct amdgpu_mode_info *mode_info, int plane_id,
3572 enum drm_plane_type plane_type,
3573 const struct dc_plane_cap *plane_cap)
3575 struct drm_plane *plane;
3576 unsigned long possible_crtcs;
3579 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3581 DRM_ERROR("KMS: Failed to allocate plane\n");
3584 plane->type = plane_type;
3587 * HACK: IGT tests expect that the primary plane for a CRTC
3588 * can only have one possible CRTC. Only expose support for
3589 * any CRTC if they're not going to be used as a primary plane
3590 * for a CRTC - like overlay or underlay planes.
3592 possible_crtcs = 1 << plane_id;
3593 if (plane_id >= dm->dc->caps.max_streams)
3594 possible_crtcs = 0xff;
3596 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3599 DRM_ERROR("KMS: Failed to initialize plane\n");
3605 mode_info->planes[plane_id] = plane;
3611 static void register_backlight_device(struct amdgpu_display_manager *dm,
3612 struct dc_link *link)
3614 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3615 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3617 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3618 link->type != dc_connection_none) {
3620 * Event if registration failed, we should continue with
3621 * DM initialization because not having a backlight control
3622 * is better then a black screen.
3624 if (!dm->backlight_dev)
3625 amdgpu_dm_register_backlight_device(dm);
3627 if (dm->backlight_dev) {
3628 dm->backlight_link[dm->num_of_edps] = link;
3637 * In this architecture, the association
3638 * connector -> encoder -> crtc
3639 * id not really requried. The crtc and connector will hold the
3640 * display_index as an abstraction to use with DAL component
3642 * Returns 0 on success
3644 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3646 struct amdgpu_display_manager *dm = &adev->dm;
3648 struct amdgpu_dm_connector *aconnector = NULL;
3649 struct amdgpu_encoder *aencoder = NULL;
3650 struct amdgpu_mode_info *mode_info = &adev->mode_info;
3652 int32_t primary_planes;
3653 enum dc_connection_type new_connection_type = dc_connection_none;
3654 const struct dc_plane_cap *plane;
3656 dm->display_indexes_num = dm->dc->caps.max_streams;
3657 /* Update the actual used number of crtc */
3658 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3660 link_cnt = dm->dc->caps.max_links;
3661 if (amdgpu_dm_mode_config_init(dm->adev)) {
3662 DRM_ERROR("DM: Failed to initialize mode config\n");
3666 /* There is one primary plane per CRTC */
3667 primary_planes = dm->dc->caps.max_streams;
3668 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3671 * Initialize primary planes, implicit planes for legacy IOCTLS.
3672 * Order is reversed to match iteration order in atomic check.
3674 for (i = (primary_planes - 1); i >= 0; i--) {
3675 plane = &dm->dc->caps.planes[i];
3677 if (initialize_plane(dm, mode_info, i,
3678 DRM_PLANE_TYPE_PRIMARY, plane)) {
3679 DRM_ERROR("KMS: Failed to initialize primary plane\n");
3685 * Initialize overlay planes, index starting after primary planes.
3686 * These planes have a higher DRM index than the primary planes since
3687 * they should be considered as having a higher z-order.
3688 * Order is reversed to match iteration order in atomic check.
3690 * Only support DCN for now, and only expose one so we don't encourage
3691 * userspace to use up all the pipes.
3693 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3694 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3696 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3699 if (!plane->blends_with_above || !plane->blends_with_below)
3702 if (!plane->pixel_format_support.argb8888)
3705 if (initialize_plane(dm, NULL, primary_planes + i,
3706 DRM_PLANE_TYPE_OVERLAY, plane)) {
3707 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3711 /* Only create one overlay plane. */
3715 for (i = 0; i < dm->dc->caps.max_streams; i++)
3716 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3717 DRM_ERROR("KMS: Failed to initialize crtc\n");
3721 /* Use Outbox interrupt */
3722 switch (adev->asic_type) {
3723 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3724 case CHIP_SIENNA_CICHLID:
3725 case CHIP_NAVY_FLOUNDER:
3728 if (register_outbox_irq_handlers(dm->adev)) {
3729 DRM_ERROR("DM: Failed to initialize IRQ\n");
3734 DRM_DEBUG_KMS("Unsupported ASIC type for outbox: 0x%X\n", adev->asic_type);
3737 /* loops over all connectors on the board */
3738 for (i = 0; i < link_cnt; i++) {
3739 struct dc_link *link = NULL;
3741 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3743 "KMS: Cannot support more than %d display indexes\n",
3744 AMDGPU_DM_MAX_DISPLAY_INDEX);
3748 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3752 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3756 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3757 DRM_ERROR("KMS: Failed to initialize encoder\n");
3761 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3762 DRM_ERROR("KMS: Failed to initialize connector\n");
3766 link = dc_get_link_at_index(dm->dc, i);
3768 if (!dc_link_detect_sink(link, &new_connection_type))
3769 DRM_ERROR("KMS: Failed to detect connector\n");
3771 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3772 emulated_link_detect(link);
3773 amdgpu_dm_update_connector_after_detect(aconnector);
3775 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3776 amdgpu_dm_update_connector_after_detect(aconnector);
3777 register_backlight_device(dm, link);
3778 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3779 amdgpu_dm_set_psr_caps(link);
3785 /* Software is initialized. Now we can register interrupt handlers. */
3786 switch (adev->asic_type) {
3787 #if defined(CONFIG_DRM_AMD_DC_SI)
3792 if (dce60_register_irq_handlers(dm->adev)) {
3793 DRM_ERROR("DM: Failed to initialize IRQ\n");
3807 case CHIP_POLARIS11:
3808 case CHIP_POLARIS10:
3809 case CHIP_POLARIS12:
3814 if (dce110_register_irq_handlers(dm->adev)) {
3815 DRM_ERROR("DM: Failed to initialize IRQ\n");
3819 #if defined(CONFIG_DRM_AMD_DC_DCN)
3825 case CHIP_SIENNA_CICHLID:
3826 case CHIP_NAVY_FLOUNDER:
3827 case CHIP_DIMGREY_CAVEFISH:
3829 if (dcn10_register_irq_handlers(dm->adev)) {
3830 DRM_ERROR("DM: Failed to initialize IRQ\n");
3836 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3848 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3850 drm_mode_config_cleanup(dm->ddev);
3851 drm_atomic_private_obj_fini(&dm->atomic_obj);
3855 /******************************************************************************
3856 * amdgpu_display_funcs functions
3857 *****************************************************************************/
3860 * dm_bandwidth_update - program display watermarks
3862 * @adev: amdgpu_device pointer
3864 * Calculate and program the display watermarks and line buffer allocation.
3866 static void dm_bandwidth_update(struct amdgpu_device *adev)
3868 /* TODO: implement later */
3871 static const struct amdgpu_display_funcs dm_display_funcs = {
3872 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3873 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3874 .backlight_set_level = NULL, /* never called for DC */
3875 .backlight_get_level = NULL, /* never called for DC */
3876 .hpd_sense = NULL,/* called unconditionally */
3877 .hpd_set_polarity = NULL, /* called unconditionally */
3878 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3879 .page_flip_get_scanoutpos =
3880 dm_crtc_get_scanoutpos,/* called unconditionally */
3881 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3882 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3885 #if defined(CONFIG_DEBUG_KERNEL_DC)
3887 static ssize_t s3_debug_store(struct device *device,
3888 struct device_attribute *attr,
3894 struct drm_device *drm_dev = dev_get_drvdata(device);
3895 struct amdgpu_device *adev = drm_to_adev(drm_dev);
3897 ret = kstrtoint(buf, 0, &s3_state);
3902 drm_kms_helper_hotplug_event(adev_to_drm(adev));
3907 return ret == 0 ? count : 0;
3910 DEVICE_ATTR_WO(s3_debug);
3914 static int dm_early_init(void *handle)
3916 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3918 switch (adev->asic_type) {
3919 #if defined(CONFIG_DRM_AMD_DC_SI)
3923 adev->mode_info.num_crtc = 6;
3924 adev->mode_info.num_hpd = 6;
3925 adev->mode_info.num_dig = 6;
3928 adev->mode_info.num_crtc = 2;
3929 adev->mode_info.num_hpd = 2;
3930 adev->mode_info.num_dig = 2;
3935 adev->mode_info.num_crtc = 6;
3936 adev->mode_info.num_hpd = 6;
3937 adev->mode_info.num_dig = 6;
3940 adev->mode_info.num_crtc = 4;
3941 adev->mode_info.num_hpd = 6;
3942 adev->mode_info.num_dig = 7;
3946 adev->mode_info.num_crtc = 2;
3947 adev->mode_info.num_hpd = 6;
3948 adev->mode_info.num_dig = 6;
3952 adev->mode_info.num_crtc = 6;
3953 adev->mode_info.num_hpd = 6;
3954 adev->mode_info.num_dig = 7;
3957 adev->mode_info.num_crtc = 3;
3958 adev->mode_info.num_hpd = 6;
3959 adev->mode_info.num_dig = 9;
3962 adev->mode_info.num_crtc = 2;
3963 adev->mode_info.num_hpd = 6;
3964 adev->mode_info.num_dig = 9;
3966 case CHIP_POLARIS11:
3967 case CHIP_POLARIS12:
3968 adev->mode_info.num_crtc = 5;
3969 adev->mode_info.num_hpd = 5;
3970 adev->mode_info.num_dig = 5;
3972 case CHIP_POLARIS10:
3974 adev->mode_info.num_crtc = 6;
3975 adev->mode_info.num_hpd = 6;
3976 adev->mode_info.num_dig = 6;
3981 adev->mode_info.num_crtc = 6;
3982 adev->mode_info.num_hpd = 6;
3983 adev->mode_info.num_dig = 6;
3985 #if defined(CONFIG_DRM_AMD_DC_DCN)
3989 adev->mode_info.num_crtc = 4;
3990 adev->mode_info.num_hpd = 4;
3991 adev->mode_info.num_dig = 4;
3995 case CHIP_SIENNA_CICHLID:
3996 case CHIP_NAVY_FLOUNDER:
3997 adev->mode_info.num_crtc = 6;
3998 adev->mode_info.num_hpd = 6;
3999 adev->mode_info.num_dig = 6;
4002 case CHIP_DIMGREY_CAVEFISH:
4003 adev->mode_info.num_crtc = 5;
4004 adev->mode_info.num_hpd = 5;
4005 adev->mode_info.num_dig = 5;
4009 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4013 amdgpu_dm_set_irq_funcs(adev);
4015 if (adev->mode_info.funcs == NULL)
4016 adev->mode_info.funcs = &dm_display_funcs;
4019 * Note: Do NOT change adev->audio_endpt_rreg and
4020 * adev->audio_endpt_wreg because they are initialised in
4021 * amdgpu_device_init()
4023 #if defined(CONFIG_DEBUG_KERNEL_DC)
4025 adev_to_drm(adev)->dev,
4026 &dev_attr_s3_debug);
4032 static bool modeset_required(struct drm_crtc_state *crtc_state,
4033 struct dc_stream_state *new_stream,
4034 struct dc_stream_state *old_stream)
4036 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4039 static bool modereset_required(struct drm_crtc_state *crtc_state)
4041 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4044 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4046 drm_encoder_cleanup(encoder);
4050 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4051 .destroy = amdgpu_dm_encoder_destroy,
4055 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4056 struct drm_framebuffer *fb,
4057 int *min_downscale, int *max_upscale)
4059 struct amdgpu_device *adev = drm_to_adev(dev);
4060 struct dc *dc = adev->dm.dc;
4061 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4062 struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4064 switch (fb->format->format) {
4065 case DRM_FORMAT_P010:
4066 case DRM_FORMAT_NV12:
4067 case DRM_FORMAT_NV21:
4068 *max_upscale = plane_cap->max_upscale_factor.nv12;
4069 *min_downscale = plane_cap->max_downscale_factor.nv12;
4072 case DRM_FORMAT_XRGB16161616F:
4073 case DRM_FORMAT_ARGB16161616F:
4074 case DRM_FORMAT_XBGR16161616F:
4075 case DRM_FORMAT_ABGR16161616F:
4076 *max_upscale = plane_cap->max_upscale_factor.fp16;
4077 *min_downscale = plane_cap->max_downscale_factor.fp16;
4081 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4082 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4087 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4088 * scaling factor of 1.0 == 1000 units.
4090 if (*max_upscale == 1)
4091 *max_upscale = 1000;
4093 if (*min_downscale == 1)
4094 *min_downscale = 1000;
4098 static int fill_dc_scaling_info(const struct drm_plane_state *state,
4099 struct dc_scaling_info *scaling_info)
4101 int scale_w, scale_h, min_downscale, max_upscale;
4103 memset(scaling_info, 0, sizeof(*scaling_info));
4105 /* Source is fixed 16.16 but we ignore mantissa for now... */
4106 scaling_info->src_rect.x = state->src_x >> 16;
4107 scaling_info->src_rect.y = state->src_y >> 16;
4110 * For reasons we don't (yet) fully understand a non-zero
4111 * src_y coordinate into an NV12 buffer can cause a
4112 * system hang. To avoid hangs (and maybe be overly cautious)
4113 * let's reject both non-zero src_x and src_y.
4115 * We currently know of only one use-case to reproduce a
4116 * scenario with non-zero src_x and src_y for NV12, which
4117 * is to gesture the YouTube Android app into full screen
4121 state->fb->format->format == DRM_FORMAT_NV12 &&
4122 (scaling_info->src_rect.x != 0 ||
4123 scaling_info->src_rect.y != 0))
4126 scaling_info->src_rect.width = state->src_w >> 16;
4127 if (scaling_info->src_rect.width == 0)
4130 scaling_info->src_rect.height = state->src_h >> 16;
4131 if (scaling_info->src_rect.height == 0)
4134 scaling_info->dst_rect.x = state->crtc_x;
4135 scaling_info->dst_rect.y = state->crtc_y;
4137 if (state->crtc_w == 0)
4140 scaling_info->dst_rect.width = state->crtc_w;
4142 if (state->crtc_h == 0)
4145 scaling_info->dst_rect.height = state->crtc_h;
4147 /* DRM doesn't specify clipping on destination output. */
4148 scaling_info->clip_rect = scaling_info->dst_rect;
4150 /* Validate scaling per-format with DC plane caps */
4151 if (state->plane && state->plane->dev && state->fb) {
4152 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4153 &min_downscale, &max_upscale);
4155 min_downscale = 250;
4156 max_upscale = 16000;
4159 scale_w = scaling_info->dst_rect.width * 1000 /
4160 scaling_info->src_rect.width;
4162 if (scale_w < min_downscale || scale_w > max_upscale)
4165 scale_h = scaling_info->dst_rect.height * 1000 /
4166 scaling_info->src_rect.height;
4168 if (scale_h < min_downscale || scale_h > max_upscale)
4172 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4173 * assume reasonable defaults based on the format.
4180 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4181 uint64_t tiling_flags)
4183 /* Fill GFX8 params */
4184 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4185 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4187 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4188 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4189 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4190 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4191 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4193 /* XXX fix me for VI */
4194 tiling_info->gfx8.num_banks = num_banks;
4195 tiling_info->gfx8.array_mode =
4196 DC_ARRAY_2D_TILED_THIN1;
4197 tiling_info->gfx8.tile_split = tile_split;
4198 tiling_info->gfx8.bank_width = bankw;
4199 tiling_info->gfx8.bank_height = bankh;
4200 tiling_info->gfx8.tile_aspect = mtaspect;
4201 tiling_info->gfx8.tile_mode =
4202 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4203 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4204 == DC_ARRAY_1D_TILED_THIN1) {
4205 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4208 tiling_info->gfx8.pipe_config =
4209 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4213 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4214 union dc_tiling_info *tiling_info)
4216 tiling_info->gfx9.num_pipes =
4217 adev->gfx.config.gb_addr_config_fields.num_pipes;
4218 tiling_info->gfx9.num_banks =
4219 adev->gfx.config.gb_addr_config_fields.num_banks;
4220 tiling_info->gfx9.pipe_interleave =
4221 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4222 tiling_info->gfx9.num_shader_engines =
4223 adev->gfx.config.gb_addr_config_fields.num_se;
4224 tiling_info->gfx9.max_compressed_frags =
4225 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4226 tiling_info->gfx9.num_rb_per_se =
4227 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4228 tiling_info->gfx9.shaderEnable = 1;
4229 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4230 adev->asic_type == CHIP_NAVY_FLOUNDER ||
4231 adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4232 adev->asic_type == CHIP_VANGOGH)
4233 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4237 validate_dcc(struct amdgpu_device *adev,
4238 const enum surface_pixel_format format,
4239 const enum dc_rotation_angle rotation,
4240 const union dc_tiling_info *tiling_info,
4241 const struct dc_plane_dcc_param *dcc,
4242 const struct dc_plane_address *address,
4243 const struct plane_size *plane_size)
4245 struct dc *dc = adev->dm.dc;
4246 struct dc_dcc_surface_param input;
4247 struct dc_surface_dcc_cap output;
4249 memset(&input, 0, sizeof(input));
4250 memset(&output, 0, sizeof(output));
4255 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4256 !dc->cap_funcs.get_dcc_compression_cap)
4259 input.format = format;
4260 input.surface_size.width = plane_size->surface_size.width;
4261 input.surface_size.height = plane_size->surface_size.height;
4262 input.swizzle_mode = tiling_info->gfx9.swizzle;
4264 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4265 input.scan = SCAN_DIRECTION_HORIZONTAL;
4266 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4267 input.scan = SCAN_DIRECTION_VERTICAL;
4269 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4272 if (!output.capable)
4275 if (dcc->independent_64b_blks == 0 &&
4276 output.grph.rgb.independent_64b_blks != 0)
4283 modifier_has_dcc(uint64_t modifier)
4285 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4289 modifier_gfx9_swizzle_mode(uint64_t modifier)
4291 if (modifier == DRM_FORMAT_MOD_LINEAR)
4294 return AMD_FMT_MOD_GET(TILE, modifier);
4297 static const struct drm_format_info *
4298 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4300 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4304 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4305 union dc_tiling_info *tiling_info,
4308 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4309 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4310 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4311 unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4313 fill_gfx9_tiling_info_from_device(adev, tiling_info);
4315 if (!IS_AMD_FMT_MOD(modifier))
4318 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4319 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4321 if (adev->family >= AMDGPU_FAMILY_NV) {
4322 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4324 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4326 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4330 enum dm_micro_swizzle {
4331 MICRO_SWIZZLE_Z = 0,
4332 MICRO_SWIZZLE_S = 1,
4333 MICRO_SWIZZLE_D = 2,
4337 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4341 struct amdgpu_device *adev = drm_to_adev(plane->dev);
4342 const struct drm_format_info *info = drm_format_info(format);
4345 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4351 * We always have to allow these modifiers:
4352 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4353 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4355 if (modifier == DRM_FORMAT_MOD_LINEAR ||
4356 modifier == DRM_FORMAT_MOD_INVALID) {
4360 /* Check that the modifier is on the list of the plane's supported modifiers. */
4361 for (i = 0; i < plane->modifier_count; i++) {
4362 if (modifier == plane->modifiers[i])
4365 if (i == plane->modifier_count)
4369 * For D swizzle the canonical modifier depends on the bpp, so check
4372 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4373 adev->family >= AMDGPU_FAMILY_NV) {
4374 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4378 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4382 if (modifier_has_dcc(modifier)) {
4383 /* Per radeonsi comments 16/64 bpp are more complicated. */
4384 if (info->cpp[0] != 4)
4386 /* We support multi-planar formats, but not when combined with
4387 * additional DCC metadata planes. */
4388 if (info->num_planes > 1)
4396 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4401 if (*cap - *size < 1) {
4402 uint64_t new_cap = *cap * 2;
4403 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4411 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4417 (*mods)[*size] = mod;
4422 add_gfx9_modifiers(const struct amdgpu_device *adev,
4423 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4425 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4426 int pipe_xor_bits = min(8, pipes +
4427 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4428 int bank_xor_bits = min(8 - pipe_xor_bits,
4429 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4430 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4431 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4434 if (adev->family == AMDGPU_FAMILY_RV) {
4435 /* Raven2 and later */
4436 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4439 * No _D DCC swizzles yet because we only allow 32bpp, which
4440 * doesn't support _D on DCN
4443 if (has_constant_encode) {
4444 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4445 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4446 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4447 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4448 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4449 AMD_FMT_MOD_SET(DCC, 1) |
4450 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4451 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4452 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4455 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4456 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4457 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4458 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4459 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4460 AMD_FMT_MOD_SET(DCC, 1) |
4461 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4462 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4463 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4465 if (has_constant_encode) {
4466 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4467 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4468 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4469 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4470 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4471 AMD_FMT_MOD_SET(DCC, 1) |
4472 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4473 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4474 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4476 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4477 AMD_FMT_MOD_SET(RB, rb) |
4478 AMD_FMT_MOD_SET(PIPE, pipes));
4481 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4482 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4483 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4484 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4485 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4486 AMD_FMT_MOD_SET(DCC, 1) |
4487 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4488 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4489 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4490 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4491 AMD_FMT_MOD_SET(RB, rb) |
4492 AMD_FMT_MOD_SET(PIPE, pipes));
4496 * Only supported for 64bpp on Raven, will be filtered on format in
4497 * dm_plane_format_mod_supported.
4499 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4500 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4501 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4502 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4503 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4505 if (adev->family == AMDGPU_FAMILY_RV) {
4506 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4507 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4508 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4509 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4510 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4514 * Only supported for 64bpp on Raven, will be filtered on format in
4515 * dm_plane_format_mod_supported.
4517 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4518 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4519 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4521 if (adev->family == AMDGPU_FAMILY_RV) {
4522 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4523 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4524 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4529 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4530 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4532 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4534 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4535 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4536 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4537 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4538 AMD_FMT_MOD_SET(DCC, 1) |
4539 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4540 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4541 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4543 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4544 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4545 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4546 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4547 AMD_FMT_MOD_SET(DCC, 1) |
4548 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4549 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4550 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4551 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4553 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4554 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4555 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4556 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4558 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4559 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4560 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4561 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4564 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4565 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4566 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4567 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4569 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4570 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4571 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4575 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4576 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4578 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4579 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4581 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4582 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4583 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4584 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4585 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4586 AMD_FMT_MOD_SET(DCC, 1) |
4587 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4588 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4589 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4590 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4592 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4593 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4594 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4595 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4596 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4597 AMD_FMT_MOD_SET(DCC, 1) |
4598 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4599 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4600 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4601 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4602 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4604 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4605 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4606 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4607 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4608 AMD_FMT_MOD_SET(PACKERS, pkrs));
4610 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4611 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4612 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4613 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4614 AMD_FMT_MOD_SET(PACKERS, pkrs));
4616 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4617 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4618 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4619 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4621 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4622 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4623 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4627 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4629 uint64_t size = 0, capacity = 128;
4632 /* We have not hooked up any pre-GFX9 modifiers. */
4633 if (adev->family < AMDGPU_FAMILY_AI)
4636 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4638 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4639 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4640 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4641 return *mods ? 0 : -ENOMEM;
4644 switch (adev->family) {
4645 case AMDGPU_FAMILY_AI:
4646 case AMDGPU_FAMILY_RV:
4647 add_gfx9_modifiers(adev, mods, &size, &capacity);
4649 case AMDGPU_FAMILY_NV:
4650 case AMDGPU_FAMILY_VGH:
4651 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4652 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4654 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4658 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4660 /* INVALID marks the end of the list. */
4661 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4670 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4671 const struct amdgpu_framebuffer *afb,
4672 const enum surface_pixel_format format,
4673 const enum dc_rotation_angle rotation,
4674 const struct plane_size *plane_size,
4675 union dc_tiling_info *tiling_info,
4676 struct dc_plane_dcc_param *dcc,
4677 struct dc_plane_address *address,
4678 const bool force_disable_dcc)
4680 const uint64_t modifier = afb->base.modifier;
4683 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4684 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4686 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4687 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4690 dcc->meta_pitch = afb->base.pitches[1];
4691 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4693 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4694 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4697 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4705 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4706 const struct amdgpu_framebuffer *afb,
4707 const enum surface_pixel_format format,
4708 const enum dc_rotation_angle rotation,
4709 const uint64_t tiling_flags,
4710 union dc_tiling_info *tiling_info,
4711 struct plane_size *plane_size,
4712 struct dc_plane_dcc_param *dcc,
4713 struct dc_plane_address *address,
4715 bool force_disable_dcc)
4717 const struct drm_framebuffer *fb = &afb->base;
4720 memset(tiling_info, 0, sizeof(*tiling_info));
4721 memset(plane_size, 0, sizeof(*plane_size));
4722 memset(dcc, 0, sizeof(*dcc));
4723 memset(address, 0, sizeof(*address));
4725 address->tmz_surface = tmz_surface;
4727 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4728 uint64_t addr = afb->address + fb->offsets[0];
4730 plane_size->surface_size.x = 0;
4731 plane_size->surface_size.y = 0;
4732 plane_size->surface_size.width = fb->width;
4733 plane_size->surface_size.height = fb->height;
4734 plane_size->surface_pitch =
4735 fb->pitches[0] / fb->format->cpp[0];
4737 address->type = PLN_ADDR_TYPE_GRAPHICS;
4738 address->grph.addr.low_part = lower_32_bits(addr);
4739 address->grph.addr.high_part = upper_32_bits(addr);
4740 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4741 uint64_t luma_addr = afb->address + fb->offsets[0];
4742 uint64_t chroma_addr = afb->address + fb->offsets[1];
4744 plane_size->surface_size.x = 0;
4745 plane_size->surface_size.y = 0;
4746 plane_size->surface_size.width = fb->width;
4747 plane_size->surface_size.height = fb->height;
4748 plane_size->surface_pitch =
4749 fb->pitches[0] / fb->format->cpp[0];
4751 plane_size->chroma_size.x = 0;
4752 plane_size->chroma_size.y = 0;
4753 /* TODO: set these based on surface format */
4754 plane_size->chroma_size.width = fb->width / 2;
4755 plane_size->chroma_size.height = fb->height / 2;
4757 plane_size->chroma_pitch =
4758 fb->pitches[1] / fb->format->cpp[1];
4760 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4761 address->video_progressive.luma_addr.low_part =
4762 lower_32_bits(luma_addr);
4763 address->video_progressive.luma_addr.high_part =
4764 upper_32_bits(luma_addr);
4765 address->video_progressive.chroma_addr.low_part =
4766 lower_32_bits(chroma_addr);
4767 address->video_progressive.chroma_addr.high_part =
4768 upper_32_bits(chroma_addr);
4771 if (adev->family >= AMDGPU_FAMILY_AI) {
4772 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4773 rotation, plane_size,
4780 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4787 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4788 bool *per_pixel_alpha, bool *global_alpha,
4789 int *global_alpha_value)
4791 *per_pixel_alpha = false;
4792 *global_alpha = false;
4793 *global_alpha_value = 0xff;
4795 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4798 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4799 static const uint32_t alpha_formats[] = {
4800 DRM_FORMAT_ARGB8888,
4801 DRM_FORMAT_RGBA8888,
4802 DRM_FORMAT_ABGR8888,
4804 uint32_t format = plane_state->fb->format->format;
4807 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4808 if (format == alpha_formats[i]) {
4809 *per_pixel_alpha = true;
4815 if (plane_state->alpha < 0xffff) {
4816 *global_alpha = true;
4817 *global_alpha_value = plane_state->alpha >> 8;
4822 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4823 const enum surface_pixel_format format,
4824 enum dc_color_space *color_space)
4828 *color_space = COLOR_SPACE_SRGB;
4830 /* DRM color properties only affect non-RGB formats. */
4831 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4834 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4836 switch (plane_state->color_encoding) {
4837 case DRM_COLOR_YCBCR_BT601:
4839 *color_space = COLOR_SPACE_YCBCR601;
4841 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4844 case DRM_COLOR_YCBCR_BT709:
4846 *color_space = COLOR_SPACE_YCBCR709;
4848 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4851 case DRM_COLOR_YCBCR_BT2020:
4853 *color_space = COLOR_SPACE_2020_YCBCR;
4866 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4867 const struct drm_plane_state *plane_state,
4868 const uint64_t tiling_flags,
4869 struct dc_plane_info *plane_info,
4870 struct dc_plane_address *address,
4872 bool force_disable_dcc)
4874 const struct drm_framebuffer *fb = plane_state->fb;
4875 const struct amdgpu_framebuffer *afb =
4876 to_amdgpu_framebuffer(plane_state->fb);
4879 memset(plane_info, 0, sizeof(*plane_info));
4881 switch (fb->format->format) {
4883 plane_info->format =
4884 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4886 case DRM_FORMAT_RGB565:
4887 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4889 case DRM_FORMAT_XRGB8888:
4890 case DRM_FORMAT_ARGB8888:
4891 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4893 case DRM_FORMAT_XRGB2101010:
4894 case DRM_FORMAT_ARGB2101010:
4895 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4897 case DRM_FORMAT_XBGR2101010:
4898 case DRM_FORMAT_ABGR2101010:
4899 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4901 case DRM_FORMAT_XBGR8888:
4902 case DRM_FORMAT_ABGR8888:
4903 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4905 case DRM_FORMAT_NV21:
4906 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4908 case DRM_FORMAT_NV12:
4909 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4911 case DRM_FORMAT_P010:
4912 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4914 case DRM_FORMAT_XRGB16161616F:
4915 case DRM_FORMAT_ARGB16161616F:
4916 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4918 case DRM_FORMAT_XBGR16161616F:
4919 case DRM_FORMAT_ABGR16161616F:
4920 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4924 "Unsupported screen format %p4cc\n",
4925 &fb->format->format);
4929 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4930 case DRM_MODE_ROTATE_0:
4931 plane_info->rotation = ROTATION_ANGLE_0;
4933 case DRM_MODE_ROTATE_90:
4934 plane_info->rotation = ROTATION_ANGLE_90;
4936 case DRM_MODE_ROTATE_180:
4937 plane_info->rotation = ROTATION_ANGLE_180;
4939 case DRM_MODE_ROTATE_270:
4940 plane_info->rotation = ROTATION_ANGLE_270;
4943 plane_info->rotation = ROTATION_ANGLE_0;
4947 plane_info->visible = true;
4948 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4950 plane_info->layer_index = 0;
4952 ret = fill_plane_color_attributes(plane_state, plane_info->format,
4953 &plane_info->color_space);
4957 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4958 plane_info->rotation, tiling_flags,
4959 &plane_info->tiling_info,
4960 &plane_info->plane_size,
4961 &plane_info->dcc, address, tmz_surface,
4966 fill_blending_from_plane_state(
4967 plane_state, &plane_info->per_pixel_alpha,
4968 &plane_info->global_alpha, &plane_info->global_alpha_value);
4973 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4974 struct dc_plane_state *dc_plane_state,
4975 struct drm_plane_state *plane_state,
4976 struct drm_crtc_state *crtc_state)
4978 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4979 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4980 struct dc_scaling_info scaling_info;
4981 struct dc_plane_info plane_info;
4983 bool force_disable_dcc = false;
4985 ret = fill_dc_scaling_info(plane_state, &scaling_info);
4989 dc_plane_state->src_rect = scaling_info.src_rect;
4990 dc_plane_state->dst_rect = scaling_info.dst_rect;
4991 dc_plane_state->clip_rect = scaling_info.clip_rect;
4992 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4994 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4995 ret = fill_dc_plane_info_and_addr(adev, plane_state,
4998 &dc_plane_state->address,
5004 dc_plane_state->format = plane_info.format;
5005 dc_plane_state->color_space = plane_info.color_space;
5006 dc_plane_state->format = plane_info.format;
5007 dc_plane_state->plane_size = plane_info.plane_size;
5008 dc_plane_state->rotation = plane_info.rotation;
5009 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5010 dc_plane_state->stereo_format = plane_info.stereo_format;
5011 dc_plane_state->tiling_info = plane_info.tiling_info;
5012 dc_plane_state->visible = plane_info.visible;
5013 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5014 dc_plane_state->global_alpha = plane_info.global_alpha;
5015 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5016 dc_plane_state->dcc = plane_info.dcc;
5017 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5018 dc_plane_state->flip_int_enabled = true;
5021 * Always set input transfer function, since plane state is refreshed
5024 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5031 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5032 const struct dm_connector_state *dm_state,
5033 struct dc_stream_state *stream)
5035 enum amdgpu_rmx_type rmx_type;
5037 struct rect src = { 0 }; /* viewport in composition space*/
5038 struct rect dst = { 0 }; /* stream addressable area */
5040 /* no mode. nothing to be done */
5044 /* Full screen scaling by default */
5045 src.width = mode->hdisplay;
5046 src.height = mode->vdisplay;
5047 dst.width = stream->timing.h_addressable;
5048 dst.height = stream->timing.v_addressable;
5051 rmx_type = dm_state->scaling;
5052 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5053 if (src.width * dst.height <
5054 src.height * dst.width) {
5055 /* height needs less upscaling/more downscaling */
5056 dst.width = src.width *
5057 dst.height / src.height;
5059 /* width needs less upscaling/more downscaling */
5060 dst.height = src.height *
5061 dst.width / src.width;
5063 } else if (rmx_type == RMX_CENTER) {
5067 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5068 dst.y = (stream->timing.v_addressable - dst.height) / 2;
5070 if (dm_state->underscan_enable) {
5071 dst.x += dm_state->underscan_hborder / 2;
5072 dst.y += dm_state->underscan_vborder / 2;
5073 dst.width -= dm_state->underscan_hborder;
5074 dst.height -= dm_state->underscan_vborder;
5081 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
5082 dst.x, dst.y, dst.width, dst.height);
5086 static enum dc_color_depth
5087 convert_color_depth_from_display_info(const struct drm_connector *connector,
5088 bool is_y420, int requested_bpc)
5095 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5096 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5098 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5100 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5103 bpc = (uint8_t)connector->display_info.bpc;
5104 /* Assume 8 bpc by default if no bpc is specified. */
5105 bpc = bpc ? bpc : 8;
5108 if (requested_bpc > 0) {
5110 * Cap display bpc based on the user requested value.
5112 * The value for state->max_bpc may not correctly updated
5113 * depending on when the connector gets added to the state
5114 * or if this was called outside of atomic check, so it
5115 * can't be used directly.
5117 bpc = min_t(u8, bpc, requested_bpc);
5119 /* Round down to the nearest even number. */
5120 bpc = bpc - (bpc & 1);
5126 * Temporary Work around, DRM doesn't parse color depth for
5127 * EDID revision before 1.4
5128 * TODO: Fix edid parsing
5130 return COLOR_DEPTH_888;
5132 return COLOR_DEPTH_666;
5134 return COLOR_DEPTH_888;
5136 return COLOR_DEPTH_101010;
5138 return COLOR_DEPTH_121212;
5140 return COLOR_DEPTH_141414;
5142 return COLOR_DEPTH_161616;
5144 return COLOR_DEPTH_UNDEFINED;
5148 static enum dc_aspect_ratio
5149 get_aspect_ratio(const struct drm_display_mode *mode_in)
5151 /* 1-1 mapping, since both enums follow the HDMI spec. */
5152 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5155 static enum dc_color_space
5156 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5158 enum dc_color_space color_space = COLOR_SPACE_SRGB;
5160 switch (dc_crtc_timing->pixel_encoding) {
5161 case PIXEL_ENCODING_YCBCR422:
5162 case PIXEL_ENCODING_YCBCR444:
5163 case PIXEL_ENCODING_YCBCR420:
5166 * 27030khz is the separation point between HDTV and SDTV
5167 * according to HDMI spec, we use YCbCr709 and YCbCr601
5170 if (dc_crtc_timing->pix_clk_100hz > 270300) {
5171 if (dc_crtc_timing->flags.Y_ONLY)
5173 COLOR_SPACE_YCBCR709_LIMITED;
5175 color_space = COLOR_SPACE_YCBCR709;
5177 if (dc_crtc_timing->flags.Y_ONLY)
5179 COLOR_SPACE_YCBCR601_LIMITED;
5181 color_space = COLOR_SPACE_YCBCR601;
5186 case PIXEL_ENCODING_RGB:
5187 color_space = COLOR_SPACE_SRGB;
5198 static bool adjust_colour_depth_from_display_info(
5199 struct dc_crtc_timing *timing_out,
5200 const struct drm_display_info *info)
5202 enum dc_color_depth depth = timing_out->display_color_depth;
5205 normalized_clk = timing_out->pix_clk_100hz / 10;
5206 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5207 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5208 normalized_clk /= 2;
5209 /* Adjusting pix clock following on HDMI spec based on colour depth */
5211 case COLOR_DEPTH_888:
5213 case COLOR_DEPTH_101010:
5214 normalized_clk = (normalized_clk * 30) / 24;
5216 case COLOR_DEPTH_121212:
5217 normalized_clk = (normalized_clk * 36) / 24;
5219 case COLOR_DEPTH_161616:
5220 normalized_clk = (normalized_clk * 48) / 24;
5223 /* The above depths are the only ones valid for HDMI. */
5226 if (normalized_clk <= info->max_tmds_clock) {
5227 timing_out->display_color_depth = depth;
5230 } while (--depth > COLOR_DEPTH_666);
5234 static void fill_stream_properties_from_drm_display_mode(
5235 struct dc_stream_state *stream,
5236 const struct drm_display_mode *mode_in,
5237 const struct drm_connector *connector,
5238 const struct drm_connector_state *connector_state,
5239 const struct dc_stream_state *old_stream,
5242 struct dc_crtc_timing *timing_out = &stream->timing;
5243 const struct drm_display_info *info = &connector->display_info;
5244 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5245 struct hdmi_vendor_infoframe hv_frame;
5246 struct hdmi_avi_infoframe avi_frame;
5248 memset(&hv_frame, 0, sizeof(hv_frame));
5249 memset(&avi_frame, 0, sizeof(avi_frame));
5251 timing_out->h_border_left = 0;
5252 timing_out->h_border_right = 0;
5253 timing_out->v_border_top = 0;
5254 timing_out->v_border_bottom = 0;
5255 /* TODO: un-hardcode */
5256 if (drm_mode_is_420_only(info, mode_in)
5257 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5258 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5259 else if (drm_mode_is_420_also(info, mode_in)
5260 && aconnector->force_yuv420_output)
5261 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5262 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5263 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5264 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5266 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5268 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5269 timing_out->display_color_depth = convert_color_depth_from_display_info(
5271 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5273 timing_out->scan_type = SCANNING_TYPE_NODATA;
5274 timing_out->hdmi_vic = 0;
5277 timing_out->vic = old_stream->timing.vic;
5278 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5279 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5281 timing_out->vic = drm_match_cea_mode(mode_in);
5282 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5283 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5284 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5285 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5288 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5289 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5290 timing_out->vic = avi_frame.video_code;
5291 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5292 timing_out->hdmi_vic = hv_frame.vic;
5295 if (is_freesync_video_mode(mode_in, aconnector)) {
5296 timing_out->h_addressable = mode_in->hdisplay;
5297 timing_out->h_total = mode_in->htotal;
5298 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5299 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5300 timing_out->v_total = mode_in->vtotal;
5301 timing_out->v_addressable = mode_in->vdisplay;
5302 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5303 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5304 timing_out->pix_clk_100hz = mode_in->clock * 10;
5306 timing_out->h_addressable = mode_in->crtc_hdisplay;
5307 timing_out->h_total = mode_in->crtc_htotal;
5308 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5309 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5310 timing_out->v_total = mode_in->crtc_vtotal;
5311 timing_out->v_addressable = mode_in->crtc_vdisplay;
5312 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5313 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5314 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5317 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5319 stream->output_color_space = get_output_color_space(timing_out);
5321 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5322 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5323 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5324 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5325 drm_mode_is_420_also(info, mode_in) &&
5326 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5327 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5328 adjust_colour_depth_from_display_info(timing_out, info);
5333 static void fill_audio_info(struct audio_info *audio_info,
5334 const struct drm_connector *drm_connector,
5335 const struct dc_sink *dc_sink)
5338 int cea_revision = 0;
5339 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5341 audio_info->manufacture_id = edid_caps->manufacturer_id;
5342 audio_info->product_id = edid_caps->product_id;
5344 cea_revision = drm_connector->display_info.cea_rev;
5346 strscpy(audio_info->display_name,
5347 edid_caps->display_name,
5348 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5350 if (cea_revision >= 3) {
5351 audio_info->mode_count = edid_caps->audio_mode_count;
5353 for (i = 0; i < audio_info->mode_count; ++i) {
5354 audio_info->modes[i].format_code =
5355 (enum audio_format_code)
5356 (edid_caps->audio_modes[i].format_code);
5357 audio_info->modes[i].channel_count =
5358 edid_caps->audio_modes[i].channel_count;
5359 audio_info->modes[i].sample_rates.all =
5360 edid_caps->audio_modes[i].sample_rate;
5361 audio_info->modes[i].sample_size =
5362 edid_caps->audio_modes[i].sample_size;
5366 audio_info->flags.all = edid_caps->speaker_flags;
5368 /* TODO: We only check for the progressive mode, check for interlace mode too */
5369 if (drm_connector->latency_present[0]) {
5370 audio_info->video_latency = drm_connector->video_latency[0];
5371 audio_info->audio_latency = drm_connector->audio_latency[0];
5374 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5379 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5380 struct drm_display_mode *dst_mode)
5382 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5383 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5384 dst_mode->crtc_clock = src_mode->crtc_clock;
5385 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5386 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5387 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
5388 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5389 dst_mode->crtc_htotal = src_mode->crtc_htotal;
5390 dst_mode->crtc_hskew = src_mode->crtc_hskew;
5391 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5392 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5393 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5394 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5395 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5399 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5400 const struct drm_display_mode *native_mode,
5403 if (scale_enabled) {
5404 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5405 } else if (native_mode->clock == drm_mode->clock &&
5406 native_mode->htotal == drm_mode->htotal &&
5407 native_mode->vtotal == drm_mode->vtotal) {
5408 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5410 /* no scaling nor amdgpu inserted, no need to patch */
5414 static struct dc_sink *
5415 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5417 struct dc_sink_init_data sink_init_data = { 0 };
5418 struct dc_sink *sink = NULL;
5419 sink_init_data.link = aconnector->dc_link;
5420 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5422 sink = dc_sink_create(&sink_init_data);
5424 DRM_ERROR("Failed to create sink!\n");
5427 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5432 static void set_multisync_trigger_params(
5433 struct dc_stream_state *stream)
5435 struct dc_stream_state *master = NULL;
5437 if (stream->triggered_crtc_reset.enabled) {
5438 master = stream->triggered_crtc_reset.event_source;
5439 stream->triggered_crtc_reset.event =
5440 master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5441 CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5442 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5446 static void set_master_stream(struct dc_stream_state *stream_set[],
5449 int j, highest_rfr = 0, master_stream = 0;
5451 for (j = 0; j < stream_count; j++) {
5452 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5453 int refresh_rate = 0;
5455 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5456 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5457 if (refresh_rate > highest_rfr) {
5458 highest_rfr = refresh_rate;
5463 for (j = 0; j < stream_count; j++) {
5465 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5469 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5472 struct dc_stream_state *stream;
5474 if (context->stream_count < 2)
5476 for (i = 0; i < context->stream_count ; i++) {
5477 if (!context->streams[i])
5480 * TODO: add a function to read AMD VSDB bits and set
5481 * crtc_sync_master.multi_sync_enabled flag
5482 * For now it's set to false
5486 set_master_stream(context->streams, context->stream_count);
5488 for (i = 0; i < context->stream_count ; i++) {
5489 stream = context->streams[i];
5494 set_multisync_trigger_params(stream);
5498 static struct drm_display_mode *
5499 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5500 bool use_probed_modes)
5502 struct drm_display_mode *m, *m_pref = NULL;
5503 u16 current_refresh, highest_refresh;
5504 struct list_head *list_head = use_probed_modes ?
5505 &aconnector->base.probed_modes :
5506 &aconnector->base.modes;
5508 if (aconnector->freesync_vid_base.clock != 0)
5509 return &aconnector->freesync_vid_base;
5511 /* Find the preferred mode */
5512 list_for_each_entry (m, list_head, head) {
5513 if (m->type & DRM_MODE_TYPE_PREFERRED) {
5520 /* Probably an EDID with no preferred mode. Fallback to first entry */
5521 m_pref = list_first_entry_or_null(
5522 &aconnector->base.modes, struct drm_display_mode, head);
5524 DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5529 highest_refresh = drm_mode_vrefresh(m_pref);
5532 * Find the mode with highest refresh rate with same resolution.
5533 * For some monitors, preferred mode is not the mode with highest
5534 * supported refresh rate.
5536 list_for_each_entry (m, list_head, head) {
5537 current_refresh = drm_mode_vrefresh(m);
5539 if (m->hdisplay == m_pref->hdisplay &&
5540 m->vdisplay == m_pref->vdisplay &&
5541 highest_refresh < current_refresh) {
5542 highest_refresh = current_refresh;
5547 aconnector->freesync_vid_base = *m_pref;
5551 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
5552 struct amdgpu_dm_connector *aconnector)
5554 struct drm_display_mode *high_mode;
5557 high_mode = get_highest_refresh_rate_mode(aconnector, false);
5558 if (!high_mode || !mode)
5561 timing_diff = high_mode->vtotal - mode->vtotal;
5563 if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5564 high_mode->hdisplay != mode->hdisplay ||
5565 high_mode->vdisplay != mode->vdisplay ||
5566 high_mode->hsync_start != mode->hsync_start ||
5567 high_mode->hsync_end != mode->hsync_end ||
5568 high_mode->htotal != mode->htotal ||
5569 high_mode->hskew != mode->hskew ||
5570 high_mode->vscan != mode->vscan ||
5571 high_mode->vsync_start - mode->vsync_start != timing_diff ||
5572 high_mode->vsync_end - mode->vsync_end != timing_diff)
5578 static struct dc_stream_state *
5579 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5580 const struct drm_display_mode *drm_mode,
5581 const struct dm_connector_state *dm_state,
5582 const struct dc_stream_state *old_stream,
5585 struct drm_display_mode *preferred_mode = NULL;
5586 struct drm_connector *drm_connector;
5587 const struct drm_connector_state *con_state =
5588 dm_state ? &dm_state->base : NULL;
5589 struct dc_stream_state *stream = NULL;
5590 struct drm_display_mode mode = *drm_mode;
5591 struct drm_display_mode saved_mode;
5592 struct drm_display_mode *freesync_mode = NULL;
5593 bool native_mode_found = false;
5594 bool recalculate_timing = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5596 int preferred_refresh = 0;
5597 #if defined(CONFIG_DRM_AMD_DC_DCN)
5598 struct dsc_dec_dpcd_caps dsc_caps;
5599 uint32_t link_bandwidth_kbps;
5601 struct dc_sink *sink = NULL;
5603 memset(&saved_mode, 0, sizeof(saved_mode));
5605 if (aconnector == NULL) {
5606 DRM_ERROR("aconnector is NULL!\n");
5610 drm_connector = &aconnector->base;
5612 if (!aconnector->dc_sink) {
5613 sink = create_fake_sink(aconnector);
5617 sink = aconnector->dc_sink;
5618 dc_sink_retain(sink);
5621 stream = dc_create_stream_for_sink(sink);
5623 if (stream == NULL) {
5624 DRM_ERROR("Failed to create stream for sink!\n");
5628 stream->dm_stream_context = aconnector;
5630 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5631 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5633 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5634 /* Search for preferred mode */
5635 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5636 native_mode_found = true;
5640 if (!native_mode_found)
5641 preferred_mode = list_first_entry_or_null(
5642 &aconnector->base.modes,
5643 struct drm_display_mode,
5646 mode_refresh = drm_mode_vrefresh(&mode);
5648 if (preferred_mode == NULL) {
5650 * This may not be an error, the use case is when we have no
5651 * usermode calls to reset and set mode upon hotplug. In this
5652 * case, we call set mode ourselves to restore the previous mode
5653 * and the modelist may not be filled in in time.
5655 DRM_DEBUG_DRIVER("No preferred mode found\n");
5657 recalculate_timing |= amdgpu_freesync_vid_mode &&
5658 is_freesync_video_mode(&mode, aconnector);
5659 if (recalculate_timing) {
5660 freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5662 mode = *freesync_mode;
5664 decide_crtc_timing_for_drm_display_mode(
5665 &mode, preferred_mode,
5666 dm_state ? (dm_state->scaling != RMX_OFF) : false);
5669 preferred_refresh = drm_mode_vrefresh(preferred_mode);
5672 if (recalculate_timing)
5673 drm_mode_set_crtcinfo(&saved_mode, 0);
5675 drm_mode_set_crtcinfo(&mode, 0);
5678 * If scaling is enabled and refresh rate didn't change
5679 * we copy the vic and polarities of the old timings
5681 if (!recalculate_timing || mode_refresh != preferred_refresh)
5682 fill_stream_properties_from_drm_display_mode(
5683 stream, &mode, &aconnector->base, con_state, NULL,
5686 fill_stream_properties_from_drm_display_mode(
5687 stream, &mode, &aconnector->base, con_state, old_stream,
5690 stream->timing.flags.DSC = 0;
5692 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5693 #if defined(CONFIG_DRM_AMD_DC_DCN)
5694 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5695 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5696 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5698 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5699 dc_link_get_link_cap(aconnector->dc_link));
5701 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5702 /* Set DSC policy according to dsc_clock_en */
5703 dc_dsc_policy_set_enable_dsc_when_not_needed(
5704 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5706 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5708 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5710 link_bandwidth_kbps,
5712 &stream->timing.dsc_cfg))
5713 stream->timing.flags.DSC = 1;
5714 /* Overwrite the stream flag if DSC is enabled through debugfs */
5715 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5716 stream->timing.flags.DSC = 1;
5718 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5719 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5721 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5722 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5724 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5725 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5730 update_stream_scaling_settings(&mode, dm_state, stream);
5733 &stream->audio_info,
5737 update_stream_signal(stream, sink);
5739 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5740 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5742 if (stream->link->psr_settings.psr_feature_enabled) {
5744 // should decide stream support vsc sdp colorimetry capability
5745 // before building vsc info packet
5747 stream->use_vsc_sdp_for_colorimetry = false;
5748 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5749 stream->use_vsc_sdp_for_colorimetry =
5750 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5752 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5753 stream->use_vsc_sdp_for_colorimetry = true;
5755 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5758 dc_sink_release(sink);
5763 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5765 drm_crtc_cleanup(crtc);
5769 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5770 struct drm_crtc_state *state)
5772 struct dm_crtc_state *cur = to_dm_crtc_state(state);
5774 /* TODO Destroy dc_stream objects are stream object is flattened */
5776 dc_stream_release(cur->stream);
5779 __drm_atomic_helper_crtc_destroy_state(state);
5785 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5787 struct dm_crtc_state *state;
5790 dm_crtc_destroy_state(crtc, crtc->state);
5792 state = kzalloc(sizeof(*state), GFP_KERNEL);
5793 if (WARN_ON(!state))
5796 __drm_atomic_helper_crtc_reset(crtc, &state->base);
5799 static struct drm_crtc_state *
5800 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5802 struct dm_crtc_state *state, *cur;
5804 cur = to_dm_crtc_state(crtc->state);
5806 if (WARN_ON(!crtc->state))
5809 state = kzalloc(sizeof(*state), GFP_KERNEL);
5813 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5816 state->stream = cur->stream;
5817 dc_stream_retain(state->stream);
5820 state->active_planes = cur->active_planes;
5821 state->vrr_infopacket = cur->vrr_infopacket;
5822 state->abm_level = cur->abm_level;
5823 state->vrr_supported = cur->vrr_supported;
5824 state->freesync_config = cur->freesync_config;
5825 state->cm_has_degamma = cur->cm_has_degamma;
5826 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5827 /* TODO Duplicate dc_stream after objects are stream object is flattened */
5829 return &state->base;
5832 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
5833 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
5835 crtc_debugfs_init(crtc);
5841 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5843 enum dc_irq_source irq_source;
5844 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5845 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5848 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5850 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5852 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
5853 acrtc->crtc_id, enable ? "en" : "dis", rc);
5857 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5859 enum dc_irq_source irq_source;
5860 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5861 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5862 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5863 #if defined(CONFIG_DRM_AMD_DC_DCN)
5864 struct amdgpu_display_manager *dm = &adev->dm;
5865 unsigned long flags;
5870 /* vblank irq on -> Only need vupdate irq in vrr mode */
5871 if (amdgpu_dm_vrr_active(acrtc_state))
5872 rc = dm_set_vupdate_irq(crtc, true);
5874 /* vblank irq off -> vupdate irq off */
5875 rc = dm_set_vupdate_irq(crtc, false);
5881 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5883 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5886 if (amdgpu_in_reset(adev))
5889 #if defined(CONFIG_DRM_AMD_DC_DCN)
5890 spin_lock_irqsave(&dm->vblank_lock, flags);
5891 dm->vblank_workqueue->dm = dm;
5892 dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
5893 dm->vblank_workqueue->enable = enable;
5894 spin_unlock_irqrestore(&dm->vblank_lock, flags);
5895 schedule_work(&dm->vblank_workqueue->mall_work);
5901 static int dm_enable_vblank(struct drm_crtc *crtc)
5903 return dm_set_vblank(crtc, true);
5906 static void dm_disable_vblank(struct drm_crtc *crtc)
5908 dm_set_vblank(crtc, false);
5911 /* Implemented only the options currently availible for the driver */
5912 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5913 .reset = dm_crtc_reset_state,
5914 .destroy = amdgpu_dm_crtc_destroy,
5915 .set_config = drm_atomic_helper_set_config,
5916 .page_flip = drm_atomic_helper_page_flip,
5917 .atomic_duplicate_state = dm_crtc_duplicate_state,
5918 .atomic_destroy_state = dm_crtc_destroy_state,
5919 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
5920 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5921 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5922 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
5923 .enable_vblank = dm_enable_vblank,
5924 .disable_vblank = dm_disable_vblank,
5925 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5926 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
5927 .late_register = amdgpu_dm_crtc_late_register,
5931 static enum drm_connector_status
5932 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5935 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5939 * 1. This interface is NOT called in context of HPD irq.
5940 * 2. This interface *is called* in context of user-mode ioctl. Which
5941 * makes it a bad place for *any* MST-related activity.
5944 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5945 !aconnector->fake_enable)
5946 connected = (aconnector->dc_sink != NULL);
5948 connected = (aconnector->base.force == DRM_FORCE_ON);
5950 update_subconnector_property(aconnector);
5952 return (connected ? connector_status_connected :
5953 connector_status_disconnected);
5956 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5957 struct drm_connector_state *connector_state,
5958 struct drm_property *property,
5961 struct drm_device *dev = connector->dev;
5962 struct amdgpu_device *adev = drm_to_adev(dev);
5963 struct dm_connector_state *dm_old_state =
5964 to_dm_connector_state(connector->state);
5965 struct dm_connector_state *dm_new_state =
5966 to_dm_connector_state(connector_state);
5970 if (property == dev->mode_config.scaling_mode_property) {
5971 enum amdgpu_rmx_type rmx_type;
5974 case DRM_MODE_SCALE_CENTER:
5975 rmx_type = RMX_CENTER;
5977 case DRM_MODE_SCALE_ASPECT:
5978 rmx_type = RMX_ASPECT;
5980 case DRM_MODE_SCALE_FULLSCREEN:
5981 rmx_type = RMX_FULL;
5983 case DRM_MODE_SCALE_NONE:
5989 if (dm_old_state->scaling == rmx_type)
5992 dm_new_state->scaling = rmx_type;
5994 } else if (property == adev->mode_info.underscan_hborder_property) {
5995 dm_new_state->underscan_hborder = val;
5997 } else if (property == adev->mode_info.underscan_vborder_property) {
5998 dm_new_state->underscan_vborder = val;
6000 } else if (property == adev->mode_info.underscan_property) {
6001 dm_new_state->underscan_enable = val;
6003 } else if (property == adev->mode_info.abm_level_property) {
6004 dm_new_state->abm_level = val;
6011 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6012 const struct drm_connector_state *state,
6013 struct drm_property *property,
6016 struct drm_device *dev = connector->dev;
6017 struct amdgpu_device *adev = drm_to_adev(dev);
6018 struct dm_connector_state *dm_state =
6019 to_dm_connector_state(state);
6022 if (property == dev->mode_config.scaling_mode_property) {
6023 switch (dm_state->scaling) {
6025 *val = DRM_MODE_SCALE_CENTER;
6028 *val = DRM_MODE_SCALE_ASPECT;
6031 *val = DRM_MODE_SCALE_FULLSCREEN;
6035 *val = DRM_MODE_SCALE_NONE;
6039 } else if (property == adev->mode_info.underscan_hborder_property) {
6040 *val = dm_state->underscan_hborder;
6042 } else if (property == adev->mode_info.underscan_vborder_property) {
6043 *val = dm_state->underscan_vborder;
6045 } else if (property == adev->mode_info.underscan_property) {
6046 *val = dm_state->underscan_enable;
6048 } else if (property == adev->mode_info.abm_level_property) {
6049 *val = dm_state->abm_level;
6056 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6058 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6060 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6063 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6065 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6066 const struct dc_link *link = aconnector->dc_link;
6067 struct amdgpu_device *adev = drm_to_adev(connector->dev);
6068 struct amdgpu_display_manager *dm = &adev->dm;
6071 * Call only if mst_mgr was iniitalized before since it's not done
6072 * for all connector types.
6074 if (aconnector->mst_mgr.dev)
6075 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6077 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6078 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6080 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
6081 link->type != dc_connection_none &&
6082 dm->backlight_dev) {
6083 backlight_device_unregister(dm->backlight_dev);
6084 dm->backlight_dev = NULL;
6088 if (aconnector->dc_em_sink)
6089 dc_sink_release(aconnector->dc_em_sink);
6090 aconnector->dc_em_sink = NULL;
6091 if (aconnector->dc_sink)
6092 dc_sink_release(aconnector->dc_sink);
6093 aconnector->dc_sink = NULL;
6095 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6096 drm_connector_unregister(connector);
6097 drm_connector_cleanup(connector);
6098 if (aconnector->i2c) {
6099 i2c_del_adapter(&aconnector->i2c->base);
6100 kfree(aconnector->i2c);
6102 kfree(aconnector->dm_dp_aux.aux.name);
6107 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6109 struct dm_connector_state *state =
6110 to_dm_connector_state(connector->state);
6112 if (connector->state)
6113 __drm_atomic_helper_connector_destroy_state(connector->state);
6117 state = kzalloc(sizeof(*state), GFP_KERNEL);
6120 state->scaling = RMX_OFF;
6121 state->underscan_enable = false;
6122 state->underscan_hborder = 0;
6123 state->underscan_vborder = 0;
6124 state->base.max_requested_bpc = 8;
6125 state->vcpi_slots = 0;
6127 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6128 state->abm_level = amdgpu_dm_abm_level;
6130 __drm_atomic_helper_connector_reset(connector, &state->base);
6134 struct drm_connector_state *
6135 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6137 struct dm_connector_state *state =
6138 to_dm_connector_state(connector->state);
6140 struct dm_connector_state *new_state =
6141 kmemdup(state, sizeof(*state), GFP_KERNEL);
6146 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6148 new_state->freesync_capable = state->freesync_capable;
6149 new_state->abm_level = state->abm_level;
6150 new_state->scaling = state->scaling;
6151 new_state->underscan_enable = state->underscan_enable;
6152 new_state->underscan_hborder = state->underscan_hborder;
6153 new_state->underscan_vborder = state->underscan_vborder;
6154 new_state->vcpi_slots = state->vcpi_slots;
6155 new_state->pbn = state->pbn;
6156 return &new_state->base;
6160 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6162 struct amdgpu_dm_connector *amdgpu_dm_connector =
6163 to_amdgpu_dm_connector(connector);
6166 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6167 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6168 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6169 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6174 #if defined(CONFIG_DEBUG_FS)
6175 connector_debugfs_init(amdgpu_dm_connector);
6181 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6182 .reset = amdgpu_dm_connector_funcs_reset,
6183 .detect = amdgpu_dm_connector_detect,
6184 .fill_modes = drm_helper_probe_single_connector_modes,
6185 .destroy = amdgpu_dm_connector_destroy,
6186 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6187 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6188 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6189 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6190 .late_register = amdgpu_dm_connector_late_register,
6191 .early_unregister = amdgpu_dm_connector_unregister
6194 static int get_modes(struct drm_connector *connector)
6196 return amdgpu_dm_connector_get_modes(connector);
6199 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6201 struct dc_sink_init_data init_params = {
6202 .link = aconnector->dc_link,
6203 .sink_signal = SIGNAL_TYPE_VIRTUAL
6207 if (!aconnector->base.edid_blob_ptr) {
6208 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6209 aconnector->base.name);
6211 aconnector->base.force = DRM_FORCE_OFF;
6212 aconnector->base.override_edid = false;
6216 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6218 aconnector->edid = edid;
6220 aconnector->dc_em_sink = dc_link_add_remote_sink(
6221 aconnector->dc_link,
6223 (edid->extensions + 1) * EDID_LENGTH,
6226 if (aconnector->base.force == DRM_FORCE_ON) {
6227 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6228 aconnector->dc_link->local_sink :
6229 aconnector->dc_em_sink;
6230 dc_sink_retain(aconnector->dc_sink);
6234 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6236 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6239 * In case of headless boot with force on for DP managed connector
6240 * Those settings have to be != 0 to get initial modeset
6242 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6243 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6244 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6248 aconnector->base.override_edid = true;
6249 create_eml_sink(aconnector);
6252 static struct dc_stream_state *
6253 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6254 const struct drm_display_mode *drm_mode,
6255 const struct dm_connector_state *dm_state,
6256 const struct dc_stream_state *old_stream)
6258 struct drm_connector *connector = &aconnector->base;
6259 struct amdgpu_device *adev = drm_to_adev(connector->dev);
6260 struct dc_stream_state *stream;
6261 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6262 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6263 enum dc_status dc_result = DC_OK;
6266 stream = create_stream_for_sink(aconnector, drm_mode,
6267 dm_state, old_stream,
6269 if (stream == NULL) {
6270 DRM_ERROR("Failed to create stream for sink!\n");
6274 dc_result = dc_validate_stream(adev->dm.dc, stream);
6276 if (dc_result != DC_OK) {
6277 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6282 dc_status_to_str(dc_result));
6284 dc_stream_release(stream);
6286 requested_bpc -= 2; /* lower bpc to retry validation */
6289 } while (stream == NULL && requested_bpc >= 6);
6291 if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6292 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6294 aconnector->force_yuv420_output = true;
6295 stream = create_validate_stream_for_sink(aconnector, drm_mode,
6296 dm_state, old_stream);
6297 aconnector->force_yuv420_output = false;
6303 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6304 struct drm_display_mode *mode)
6306 int result = MODE_ERROR;
6307 struct dc_sink *dc_sink;
6308 /* TODO: Unhardcode stream count */
6309 struct dc_stream_state *stream;
6310 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6312 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6313 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6317 * Only run this the first time mode_valid is called to initilialize
6320 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6321 !aconnector->dc_em_sink)
6322 handle_edid_mgmt(aconnector);
6324 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6326 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6327 aconnector->base.force != DRM_FORCE_ON) {
6328 DRM_ERROR("dc_sink is NULL!\n");
6332 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6334 dc_stream_release(stream);
6339 /* TODO: error handling*/
6343 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6344 struct dc_info_packet *out)
6346 struct hdmi_drm_infoframe frame;
6347 unsigned char buf[30]; /* 26 + 4 */
6351 memset(out, 0, sizeof(*out));
6353 if (!state->hdr_output_metadata)
6356 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6360 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6364 /* Static metadata is a fixed 26 bytes + 4 byte header. */
6368 /* Prepare the infopacket for DC. */
6369 switch (state->connector->connector_type) {
6370 case DRM_MODE_CONNECTOR_HDMIA:
6371 out->hb0 = 0x87; /* type */
6372 out->hb1 = 0x01; /* version */
6373 out->hb2 = 0x1A; /* length */
6374 out->sb[0] = buf[3]; /* checksum */
6378 case DRM_MODE_CONNECTOR_DisplayPort:
6379 case DRM_MODE_CONNECTOR_eDP:
6380 out->hb0 = 0x00; /* sdp id, zero */
6381 out->hb1 = 0x87; /* type */
6382 out->hb2 = 0x1D; /* payload len - 1 */
6383 out->hb3 = (0x13 << 2); /* sdp version */
6384 out->sb[0] = 0x01; /* version */
6385 out->sb[1] = 0x1A; /* length */
6393 memcpy(&out->sb[i], &buf[4], 26);
6396 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6397 sizeof(out->sb), false);
6403 is_hdr_metadata_different(const struct drm_connector_state *old_state,
6404 const struct drm_connector_state *new_state)
6406 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
6407 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
6409 if (old_blob != new_blob) {
6410 if (old_blob && new_blob &&
6411 old_blob->length == new_blob->length)
6412 return memcmp(old_blob->data, new_blob->data,
6422 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6423 struct drm_atomic_state *state)
6425 struct drm_connector_state *new_con_state =
6426 drm_atomic_get_new_connector_state(state, conn);
6427 struct drm_connector_state *old_con_state =
6428 drm_atomic_get_old_connector_state(state, conn);
6429 struct drm_crtc *crtc = new_con_state->crtc;
6430 struct drm_crtc_state *new_crtc_state;
6433 trace_amdgpu_dm_connector_atomic_check(new_con_state);
6438 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
6439 struct dc_info_packet hdr_infopacket;
6441 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6445 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6446 if (IS_ERR(new_crtc_state))
6447 return PTR_ERR(new_crtc_state);
6450 * DC considers the stream backends changed if the
6451 * static metadata changes. Forcing the modeset also
6452 * gives a simple way for userspace to switch from
6453 * 8bpc to 10bpc when setting the metadata to enter
6456 * Changing the static metadata after it's been
6457 * set is permissible, however. So only force a
6458 * modeset if we're entering or exiting HDR.
6460 new_crtc_state->mode_changed =
6461 !old_con_state->hdr_output_metadata ||
6462 !new_con_state->hdr_output_metadata;
6468 static const struct drm_connector_helper_funcs
6469 amdgpu_dm_connector_helper_funcs = {
6471 * If hotplugging a second bigger display in FB Con mode, bigger resolution
6472 * modes will be filtered by drm_mode_validate_size(), and those modes
6473 * are missing after user start lightdm. So we need to renew modes list.
6474 * in get_modes call back, not just return the modes count
6476 .get_modes = get_modes,
6477 .mode_valid = amdgpu_dm_connector_mode_valid,
6478 .atomic_check = amdgpu_dm_connector_atomic_check,
6481 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6485 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6487 struct drm_atomic_state *state = new_crtc_state->state;
6488 struct drm_plane *plane;
6491 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6492 struct drm_plane_state *new_plane_state;
6494 /* Cursor planes are "fake". */
6495 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6498 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6500 if (!new_plane_state) {
6502 * The plane is enable on the CRTC and hasn't changed
6503 * state. This means that it previously passed
6504 * validation and is therefore enabled.
6510 /* We need a framebuffer to be considered enabled. */
6511 num_active += (new_plane_state->fb != NULL);
6517 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6518 struct drm_crtc_state *new_crtc_state)
6520 struct dm_crtc_state *dm_new_crtc_state =
6521 to_dm_crtc_state(new_crtc_state);
6523 dm_new_crtc_state->active_planes = 0;
6525 if (!dm_new_crtc_state->stream)
6528 dm_new_crtc_state->active_planes =
6529 count_crtc_active_planes(new_crtc_state);
6532 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6533 struct drm_atomic_state *state)
6535 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6537 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6538 struct dc *dc = adev->dm.dc;
6539 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6542 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6544 dm_update_crtc_active_planes(crtc, crtc_state);
6546 if (unlikely(!dm_crtc_state->stream &&
6547 modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6553 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6554 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6555 * planes are disabled, which is not supported by the hardware. And there is legacy
6556 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6558 if (crtc_state->enable &&
6559 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6560 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6564 /* In some use cases, like reset, no stream is attached */
6565 if (!dm_crtc_state->stream)
6568 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6571 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6575 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6576 const struct drm_display_mode *mode,
6577 struct drm_display_mode *adjusted_mode)
6582 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6583 .disable = dm_crtc_helper_disable,
6584 .atomic_check = dm_crtc_helper_atomic_check,
6585 .mode_fixup = dm_crtc_helper_mode_fixup,
6586 .get_scanout_position = amdgpu_crtc_get_scanout_position,
6589 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6594 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6596 switch (display_color_depth) {
6597 case COLOR_DEPTH_666:
6599 case COLOR_DEPTH_888:
6601 case COLOR_DEPTH_101010:
6603 case COLOR_DEPTH_121212:
6605 case COLOR_DEPTH_141414:
6607 case COLOR_DEPTH_161616:
6615 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6616 struct drm_crtc_state *crtc_state,
6617 struct drm_connector_state *conn_state)
6619 struct drm_atomic_state *state = crtc_state->state;
6620 struct drm_connector *connector = conn_state->connector;
6621 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6622 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6623 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6624 struct drm_dp_mst_topology_mgr *mst_mgr;
6625 struct drm_dp_mst_port *mst_port;
6626 enum dc_color_depth color_depth;
6628 bool is_y420 = false;
6630 if (!aconnector->port || !aconnector->dc_sink)
6633 mst_port = aconnector->port;
6634 mst_mgr = &aconnector->mst_port->mst_mgr;
6636 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6639 if (!state->duplicated) {
6640 int max_bpc = conn_state->max_requested_bpc;
6641 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6642 aconnector->force_yuv420_output;
6643 color_depth = convert_color_depth_from_display_info(connector,
6646 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6647 clock = adjusted_mode->clock;
6648 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6650 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6653 dm_new_connector_state->pbn,
6654 dm_mst_get_pbn_divider(aconnector->dc_link));
6655 if (dm_new_connector_state->vcpi_slots < 0) {
6656 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6657 return dm_new_connector_state->vcpi_slots;
6662 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6663 .disable = dm_encoder_helper_disable,
6664 .atomic_check = dm_encoder_helper_atomic_check
6667 #if defined(CONFIG_DRM_AMD_DC_DCN)
6668 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6669 struct dc_state *dc_state)
6671 struct dc_stream_state *stream = NULL;
6672 struct drm_connector *connector;
6673 struct drm_connector_state *new_con_state;
6674 struct amdgpu_dm_connector *aconnector;
6675 struct dm_connector_state *dm_conn_state;
6676 int i, j, clock, bpp;
6677 int vcpi, pbn_div, pbn = 0;
6679 for_each_new_connector_in_state(state, connector, new_con_state, i) {
6681 aconnector = to_amdgpu_dm_connector(connector);
6683 if (!aconnector->port)
6686 if (!new_con_state || !new_con_state->crtc)
6689 dm_conn_state = to_dm_connector_state(new_con_state);
6691 for (j = 0; j < dc_state->stream_count; j++) {
6692 stream = dc_state->streams[j];
6696 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6705 if (stream->timing.flags.DSC != 1) {
6706 drm_dp_mst_atomic_enable_dsc(state,
6714 pbn_div = dm_mst_get_pbn_divider(stream->link);
6715 bpp = stream->timing.dsc_cfg.bits_per_pixel;
6716 clock = stream->timing.pix_clk_100hz / 10;
6717 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6718 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6725 dm_conn_state->pbn = pbn;
6726 dm_conn_state->vcpi_slots = vcpi;
6732 static void dm_drm_plane_reset(struct drm_plane *plane)
6734 struct dm_plane_state *amdgpu_state = NULL;
6737 plane->funcs->atomic_destroy_state(plane, plane->state);
6739 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6740 WARN_ON(amdgpu_state == NULL);
6743 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6746 static struct drm_plane_state *
6747 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6749 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6751 old_dm_plane_state = to_dm_plane_state(plane->state);
6752 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6753 if (!dm_plane_state)
6756 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6758 if (old_dm_plane_state->dc_state) {
6759 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6760 dc_plane_state_retain(dm_plane_state->dc_state);
6763 return &dm_plane_state->base;
6766 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6767 struct drm_plane_state *state)
6769 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6771 if (dm_plane_state->dc_state)
6772 dc_plane_state_release(dm_plane_state->dc_state);
6774 drm_atomic_helper_plane_destroy_state(plane, state);
6777 static const struct drm_plane_funcs dm_plane_funcs = {
6778 .update_plane = drm_atomic_helper_update_plane,
6779 .disable_plane = drm_atomic_helper_disable_plane,
6780 .destroy = drm_primary_helper_destroy,
6781 .reset = dm_drm_plane_reset,
6782 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6783 .atomic_destroy_state = dm_drm_plane_destroy_state,
6784 .format_mod_supported = dm_plane_format_mod_supported,
6787 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6788 struct drm_plane_state *new_state)
6790 struct amdgpu_framebuffer *afb;
6791 struct drm_gem_object *obj;
6792 struct amdgpu_device *adev;
6793 struct amdgpu_bo *rbo;
6794 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6795 struct list_head list;
6796 struct ttm_validate_buffer tv;
6797 struct ww_acquire_ctx ticket;
6801 if (!new_state->fb) {
6802 DRM_DEBUG_KMS("No FB bound\n");
6806 afb = to_amdgpu_framebuffer(new_state->fb);
6807 obj = new_state->fb->obj[0];
6808 rbo = gem_to_amdgpu_bo(obj);
6809 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6810 INIT_LIST_HEAD(&list);
6814 list_add(&tv.head, &list);
6816 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6818 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6822 if (plane->type != DRM_PLANE_TYPE_CURSOR)
6823 domain = amdgpu_display_supported_domains(adev, rbo->flags);
6825 domain = AMDGPU_GEM_DOMAIN_VRAM;
6827 r = amdgpu_bo_pin(rbo, domain);
6828 if (unlikely(r != 0)) {
6829 if (r != -ERESTARTSYS)
6830 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6831 ttm_eu_backoff_reservation(&ticket, &list);
6835 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6836 if (unlikely(r != 0)) {
6837 amdgpu_bo_unpin(rbo);
6838 ttm_eu_backoff_reservation(&ticket, &list);
6839 DRM_ERROR("%p bind failed\n", rbo);
6843 ttm_eu_backoff_reservation(&ticket, &list);
6845 afb->address = amdgpu_bo_gpu_offset(rbo);
6850 * We don't do surface updates on planes that have been newly created,
6851 * but we also don't have the afb->address during atomic check.
6853 * Fill in buffer attributes depending on the address here, but only on
6854 * newly created planes since they're not being used by DC yet and this
6855 * won't modify global state.
6857 dm_plane_state_old = to_dm_plane_state(plane->state);
6858 dm_plane_state_new = to_dm_plane_state(new_state);
6860 if (dm_plane_state_new->dc_state &&
6861 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6862 struct dc_plane_state *plane_state =
6863 dm_plane_state_new->dc_state;
6864 bool force_disable_dcc = !plane_state->dcc.enable;
6866 fill_plane_buffer_attributes(
6867 adev, afb, plane_state->format, plane_state->rotation,
6869 &plane_state->tiling_info, &plane_state->plane_size,
6870 &plane_state->dcc, &plane_state->address,
6871 afb->tmz_surface, force_disable_dcc);
6877 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6878 struct drm_plane_state *old_state)
6880 struct amdgpu_bo *rbo;
6886 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6887 r = amdgpu_bo_reserve(rbo, false);
6889 DRM_ERROR("failed to reserve rbo before unpin\n");
6893 amdgpu_bo_unpin(rbo);
6894 amdgpu_bo_unreserve(rbo);
6895 amdgpu_bo_unref(&rbo);
6898 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6899 struct drm_crtc_state *new_crtc_state)
6901 struct drm_framebuffer *fb = state->fb;
6902 int min_downscale, max_upscale;
6904 int max_scale = INT_MAX;
6906 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6907 if (fb && state->crtc) {
6908 /* Validate viewport to cover the case when only the position changes */
6909 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
6910 int viewport_width = state->crtc_w;
6911 int viewport_height = state->crtc_h;
6913 if (state->crtc_x < 0)
6914 viewport_width += state->crtc_x;
6915 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
6916 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
6918 if (state->crtc_y < 0)
6919 viewport_height += state->crtc_y;
6920 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
6921 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
6923 if (viewport_width < 0 || viewport_height < 0) {
6924 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
6926 } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
6927 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
6929 } else if (viewport_height < MIN_VIEWPORT_SIZE) {
6930 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
6936 /* Get min/max allowed scaling factors from plane caps. */
6937 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6938 &min_downscale, &max_upscale);
6940 * Convert to drm convention: 16.16 fixed point, instead of dc's
6941 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6942 * dst/src, so min_scale = 1.0 / max_upscale, etc.
6944 min_scale = (1000 << 16) / max_upscale;
6945 max_scale = (1000 << 16) / min_downscale;
6948 return drm_atomic_helper_check_plane_state(
6949 state, new_crtc_state, min_scale, max_scale, true, true);
6952 static int dm_plane_atomic_check(struct drm_plane *plane,
6953 struct drm_atomic_state *state)
6955 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
6957 struct amdgpu_device *adev = drm_to_adev(plane->dev);
6958 struct dc *dc = adev->dm.dc;
6959 struct dm_plane_state *dm_plane_state;
6960 struct dc_scaling_info scaling_info;
6961 struct drm_crtc_state *new_crtc_state;
6964 trace_amdgpu_dm_plane_atomic_check(new_plane_state);
6966 dm_plane_state = to_dm_plane_state(new_plane_state);
6968 if (!dm_plane_state->dc_state)
6972 drm_atomic_get_new_crtc_state(state,
6973 new_plane_state->crtc);
6974 if (!new_crtc_state)
6977 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
6981 ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
6985 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6991 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6992 struct drm_atomic_state *state)
6994 /* Only support async updates on cursor planes. */
6995 if (plane->type != DRM_PLANE_TYPE_CURSOR)
7001 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7002 struct drm_atomic_state *state)
7004 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7006 struct drm_plane_state *old_state =
7007 drm_atomic_get_old_plane_state(state, plane);
7009 trace_amdgpu_dm_atomic_update_cursor(new_state);
7011 swap(plane->state->fb, new_state->fb);
7013 plane->state->src_x = new_state->src_x;
7014 plane->state->src_y = new_state->src_y;
7015 plane->state->src_w = new_state->src_w;
7016 plane->state->src_h = new_state->src_h;
7017 plane->state->crtc_x = new_state->crtc_x;
7018 plane->state->crtc_y = new_state->crtc_y;
7019 plane->state->crtc_w = new_state->crtc_w;
7020 plane->state->crtc_h = new_state->crtc_h;
7022 handle_cursor_update(plane, old_state);
7025 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7026 .prepare_fb = dm_plane_helper_prepare_fb,
7027 .cleanup_fb = dm_plane_helper_cleanup_fb,
7028 .atomic_check = dm_plane_atomic_check,
7029 .atomic_async_check = dm_plane_atomic_async_check,
7030 .atomic_async_update = dm_plane_atomic_async_update
7034 * TODO: these are currently initialized to rgb formats only.
7035 * For future use cases we should either initialize them dynamically based on
7036 * plane capabilities, or initialize this array to all formats, so internal drm
7037 * check will succeed, and let DC implement proper check
7039 static const uint32_t rgb_formats[] = {
7040 DRM_FORMAT_XRGB8888,
7041 DRM_FORMAT_ARGB8888,
7042 DRM_FORMAT_RGBA8888,
7043 DRM_FORMAT_XRGB2101010,
7044 DRM_FORMAT_XBGR2101010,
7045 DRM_FORMAT_ARGB2101010,
7046 DRM_FORMAT_ABGR2101010,
7047 DRM_FORMAT_XBGR8888,
7048 DRM_FORMAT_ABGR8888,
7052 static const uint32_t overlay_formats[] = {
7053 DRM_FORMAT_XRGB8888,
7054 DRM_FORMAT_ARGB8888,
7055 DRM_FORMAT_RGBA8888,
7056 DRM_FORMAT_XBGR8888,
7057 DRM_FORMAT_ABGR8888,
7061 static const u32 cursor_formats[] = {
7065 static int get_plane_formats(const struct drm_plane *plane,
7066 const struct dc_plane_cap *plane_cap,
7067 uint32_t *formats, int max_formats)
7069 int i, num_formats = 0;
7072 * TODO: Query support for each group of formats directly from
7073 * DC plane caps. This will require adding more formats to the
7077 switch (plane->type) {
7078 case DRM_PLANE_TYPE_PRIMARY:
7079 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7080 if (num_formats >= max_formats)
7083 formats[num_formats++] = rgb_formats[i];
7086 if (plane_cap && plane_cap->pixel_format_support.nv12)
7087 formats[num_formats++] = DRM_FORMAT_NV12;
7088 if (plane_cap && plane_cap->pixel_format_support.p010)
7089 formats[num_formats++] = DRM_FORMAT_P010;
7090 if (plane_cap && plane_cap->pixel_format_support.fp16) {
7091 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7092 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7093 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7094 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7098 case DRM_PLANE_TYPE_OVERLAY:
7099 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7100 if (num_formats >= max_formats)
7103 formats[num_formats++] = overlay_formats[i];
7107 case DRM_PLANE_TYPE_CURSOR:
7108 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7109 if (num_formats >= max_formats)
7112 formats[num_formats++] = cursor_formats[i];
7120 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7121 struct drm_plane *plane,
7122 unsigned long possible_crtcs,
7123 const struct dc_plane_cap *plane_cap)
7125 uint32_t formats[32];
7128 unsigned int supported_rotations;
7129 uint64_t *modifiers = NULL;
7131 num_formats = get_plane_formats(plane, plane_cap, formats,
7132 ARRAY_SIZE(formats));
7134 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7138 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7139 &dm_plane_funcs, formats, num_formats,
7140 modifiers, plane->type, NULL);
7145 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7146 plane_cap && plane_cap->per_pixel_alpha) {
7147 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7148 BIT(DRM_MODE_BLEND_PREMULTI);
7150 drm_plane_create_alpha_property(plane);
7151 drm_plane_create_blend_mode_property(plane, blend_caps);
7154 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7156 (plane_cap->pixel_format_support.nv12 ||
7157 plane_cap->pixel_format_support.p010)) {
7158 /* This only affects YUV formats. */
7159 drm_plane_create_color_properties(
7161 BIT(DRM_COLOR_YCBCR_BT601) |
7162 BIT(DRM_COLOR_YCBCR_BT709) |
7163 BIT(DRM_COLOR_YCBCR_BT2020),
7164 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7165 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7166 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7169 supported_rotations =
7170 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7171 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7173 if (dm->adev->asic_type >= CHIP_BONAIRE &&
7174 plane->type != DRM_PLANE_TYPE_CURSOR)
7175 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7176 supported_rotations);
7178 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7180 /* Create (reset) the plane state */
7181 if (plane->funcs->reset)
7182 plane->funcs->reset(plane);
7187 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7188 struct drm_plane *plane,
7189 uint32_t crtc_index)
7191 struct amdgpu_crtc *acrtc = NULL;
7192 struct drm_plane *cursor_plane;
7196 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7200 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7201 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7203 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7207 res = drm_crtc_init_with_planes(
7212 &amdgpu_dm_crtc_funcs, NULL);
7217 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7219 /* Create (reset) the plane state */
7220 if (acrtc->base.funcs->reset)
7221 acrtc->base.funcs->reset(&acrtc->base);
7223 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7224 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7226 acrtc->crtc_id = crtc_index;
7227 acrtc->base.enabled = false;
7228 acrtc->otg_inst = -1;
7230 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7231 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7232 true, MAX_COLOR_LUT_ENTRIES);
7233 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7239 kfree(cursor_plane);
7244 static int to_drm_connector_type(enum signal_type st)
7247 case SIGNAL_TYPE_HDMI_TYPE_A:
7248 return DRM_MODE_CONNECTOR_HDMIA;
7249 case SIGNAL_TYPE_EDP:
7250 return DRM_MODE_CONNECTOR_eDP;
7251 case SIGNAL_TYPE_LVDS:
7252 return DRM_MODE_CONNECTOR_LVDS;
7253 case SIGNAL_TYPE_RGB:
7254 return DRM_MODE_CONNECTOR_VGA;
7255 case SIGNAL_TYPE_DISPLAY_PORT:
7256 case SIGNAL_TYPE_DISPLAY_PORT_MST:
7257 return DRM_MODE_CONNECTOR_DisplayPort;
7258 case SIGNAL_TYPE_DVI_DUAL_LINK:
7259 case SIGNAL_TYPE_DVI_SINGLE_LINK:
7260 return DRM_MODE_CONNECTOR_DVID;
7261 case SIGNAL_TYPE_VIRTUAL:
7262 return DRM_MODE_CONNECTOR_VIRTUAL;
7265 return DRM_MODE_CONNECTOR_Unknown;
7269 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7271 struct drm_encoder *encoder;
7273 /* There is only one encoder per connector */
7274 drm_connector_for_each_possible_encoder(connector, encoder)
7280 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7282 struct drm_encoder *encoder;
7283 struct amdgpu_encoder *amdgpu_encoder;
7285 encoder = amdgpu_dm_connector_to_encoder(connector);
7287 if (encoder == NULL)
7290 amdgpu_encoder = to_amdgpu_encoder(encoder);
7292 amdgpu_encoder->native_mode.clock = 0;
7294 if (!list_empty(&connector->probed_modes)) {
7295 struct drm_display_mode *preferred_mode = NULL;
7297 list_for_each_entry(preferred_mode,
7298 &connector->probed_modes,
7300 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7301 amdgpu_encoder->native_mode = *preferred_mode;
7309 static struct drm_display_mode *
7310 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7312 int hdisplay, int vdisplay)
7314 struct drm_device *dev = encoder->dev;
7315 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7316 struct drm_display_mode *mode = NULL;
7317 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7319 mode = drm_mode_duplicate(dev, native_mode);
7324 mode->hdisplay = hdisplay;
7325 mode->vdisplay = vdisplay;
7326 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7327 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7333 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7334 struct drm_connector *connector)
7336 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7337 struct drm_display_mode *mode = NULL;
7338 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7339 struct amdgpu_dm_connector *amdgpu_dm_connector =
7340 to_amdgpu_dm_connector(connector);
7344 char name[DRM_DISPLAY_MODE_LEN];
7347 } common_modes[] = {
7348 { "640x480", 640, 480},
7349 { "800x600", 800, 600},
7350 { "1024x768", 1024, 768},
7351 { "1280x720", 1280, 720},
7352 { "1280x800", 1280, 800},
7353 {"1280x1024", 1280, 1024},
7354 { "1440x900", 1440, 900},
7355 {"1680x1050", 1680, 1050},
7356 {"1600x1200", 1600, 1200},
7357 {"1920x1080", 1920, 1080},
7358 {"1920x1200", 1920, 1200}
7361 n = ARRAY_SIZE(common_modes);
7363 for (i = 0; i < n; i++) {
7364 struct drm_display_mode *curmode = NULL;
7365 bool mode_existed = false;
7367 if (common_modes[i].w > native_mode->hdisplay ||
7368 common_modes[i].h > native_mode->vdisplay ||
7369 (common_modes[i].w == native_mode->hdisplay &&
7370 common_modes[i].h == native_mode->vdisplay))
7373 list_for_each_entry(curmode, &connector->probed_modes, head) {
7374 if (common_modes[i].w == curmode->hdisplay &&
7375 common_modes[i].h == curmode->vdisplay) {
7376 mode_existed = true;
7384 mode = amdgpu_dm_create_common_mode(encoder,
7385 common_modes[i].name, common_modes[i].w,
7387 drm_mode_probed_add(connector, mode);
7388 amdgpu_dm_connector->num_modes++;
7392 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7395 struct amdgpu_dm_connector *amdgpu_dm_connector =
7396 to_amdgpu_dm_connector(connector);
7399 /* empty probed_modes */
7400 INIT_LIST_HEAD(&connector->probed_modes);
7401 amdgpu_dm_connector->num_modes =
7402 drm_add_edid_modes(connector, edid);
7404 /* sorting the probed modes before calling function
7405 * amdgpu_dm_get_native_mode() since EDID can have
7406 * more than one preferred mode. The modes that are
7407 * later in the probed mode list could be of higher
7408 * and preferred resolution. For example, 3840x2160
7409 * resolution in base EDID preferred timing and 4096x2160
7410 * preferred resolution in DID extension block later.
7412 drm_mode_sort(&connector->probed_modes);
7413 amdgpu_dm_get_native_mode(connector);
7415 /* Freesync capabilities are reset by calling
7416 * drm_add_edid_modes() and need to be
7419 amdgpu_dm_update_freesync_caps(connector, edid);
7421 amdgpu_dm_connector->num_modes = 0;
7425 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7426 struct drm_display_mode *mode)
7428 struct drm_display_mode *m;
7430 list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7431 if (drm_mode_equal(m, mode))
7438 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7440 const struct drm_display_mode *m;
7441 struct drm_display_mode *new_mode;
7443 uint32_t new_modes_count = 0;
7445 /* Standard FPS values
7454 * 60 - Commonly used
7455 * 48,72,96 - Multiples of 24
7457 const uint32_t common_rates[] = { 23976, 24000, 25000, 29970, 30000,
7458 48000, 50000, 60000, 72000, 96000 };
7461 * Find mode with highest refresh rate with the same resolution
7462 * as the preferred mode. Some monitors report a preferred mode
7463 * with lower resolution than the highest refresh rate supported.
7466 m = get_highest_refresh_rate_mode(aconnector, true);
7470 for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7471 uint64_t target_vtotal, target_vtotal_diff;
7474 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7477 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7478 common_rates[i] > aconnector->max_vfreq * 1000)
7481 num = (unsigned long long)m->clock * 1000 * 1000;
7482 den = common_rates[i] * (unsigned long long)m->htotal;
7483 target_vtotal = div_u64(num, den);
7484 target_vtotal_diff = target_vtotal - m->vtotal;
7486 /* Check for illegal modes */
7487 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7488 m->vsync_end + target_vtotal_diff < m->vsync_start ||
7489 m->vtotal + target_vtotal_diff < m->vsync_end)
7492 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7496 new_mode->vtotal += (u16)target_vtotal_diff;
7497 new_mode->vsync_start += (u16)target_vtotal_diff;
7498 new_mode->vsync_end += (u16)target_vtotal_diff;
7499 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7500 new_mode->type |= DRM_MODE_TYPE_DRIVER;
7502 if (!is_duplicate_mode(aconnector, new_mode)) {
7503 drm_mode_probed_add(&aconnector->base, new_mode);
7504 new_modes_count += 1;
7506 drm_mode_destroy(aconnector->base.dev, new_mode);
7509 return new_modes_count;
7512 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7515 struct amdgpu_dm_connector *amdgpu_dm_connector =
7516 to_amdgpu_dm_connector(connector);
7518 if (!(amdgpu_freesync_vid_mode && edid))
7521 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7522 amdgpu_dm_connector->num_modes +=
7523 add_fs_modes(amdgpu_dm_connector);
7526 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
7528 struct amdgpu_dm_connector *amdgpu_dm_connector =
7529 to_amdgpu_dm_connector(connector);
7530 struct drm_encoder *encoder;
7531 struct edid *edid = amdgpu_dm_connector->edid;
7533 encoder = amdgpu_dm_connector_to_encoder(connector);
7535 if (!drm_edid_is_valid(edid)) {
7536 amdgpu_dm_connector->num_modes =
7537 drm_add_modes_noedid(connector, 640, 480);
7539 amdgpu_dm_connector_ddc_get_modes(connector, edid);
7540 amdgpu_dm_connector_add_common_modes(encoder, connector);
7541 amdgpu_dm_connector_add_freesync_modes(connector, edid);
7543 amdgpu_dm_fbc_init(connector);
7545 return amdgpu_dm_connector->num_modes;
7548 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7549 struct amdgpu_dm_connector *aconnector,
7551 struct dc_link *link,
7554 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7557 * Some of the properties below require access to state, like bpc.
7558 * Allocate some default initial connector state with our reset helper.
7560 if (aconnector->base.funcs->reset)
7561 aconnector->base.funcs->reset(&aconnector->base);
7563 aconnector->connector_id = link_index;
7564 aconnector->dc_link = link;
7565 aconnector->base.interlace_allowed = false;
7566 aconnector->base.doublescan_allowed = false;
7567 aconnector->base.stereo_allowed = false;
7568 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7569 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7570 aconnector->audio_inst = -1;
7571 mutex_init(&aconnector->hpd_lock);
7574 * configure support HPD hot plug connector_>polled default value is 0
7575 * which means HPD hot plug not supported
7577 switch (connector_type) {
7578 case DRM_MODE_CONNECTOR_HDMIA:
7579 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7580 aconnector->base.ycbcr_420_allowed =
7581 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7583 case DRM_MODE_CONNECTOR_DisplayPort:
7584 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7585 aconnector->base.ycbcr_420_allowed =
7586 link->link_enc->features.dp_ycbcr420_supported ? true : false;
7588 case DRM_MODE_CONNECTOR_DVID:
7589 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7595 drm_object_attach_property(&aconnector->base.base,
7596 dm->ddev->mode_config.scaling_mode_property,
7597 DRM_MODE_SCALE_NONE);
7599 drm_object_attach_property(&aconnector->base.base,
7600 adev->mode_info.underscan_property,
7602 drm_object_attach_property(&aconnector->base.base,
7603 adev->mode_info.underscan_hborder_property,
7605 drm_object_attach_property(&aconnector->base.base,
7606 adev->mode_info.underscan_vborder_property,
7609 if (!aconnector->mst_port)
7610 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7612 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
7613 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7614 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7616 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7617 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7618 drm_object_attach_property(&aconnector->base.base,
7619 adev->mode_info.abm_level_property, 0);
7622 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7623 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7624 connector_type == DRM_MODE_CONNECTOR_eDP) {
7625 drm_object_attach_property(
7626 &aconnector->base.base,
7627 dm->ddev->mode_config.hdr_output_metadata_property, 0);
7629 if (!aconnector->mst_port)
7630 drm_connector_attach_vrr_capable_property(&aconnector->base);
7632 #ifdef CONFIG_DRM_AMD_DC_HDCP
7633 if (adev->dm.hdcp_workqueue)
7634 drm_connector_attach_content_protection_property(&aconnector->base, true);
7639 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7640 struct i2c_msg *msgs, int num)
7642 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7643 struct ddc_service *ddc_service = i2c->ddc_service;
7644 struct i2c_command cmd;
7648 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7653 cmd.number_of_payloads = num;
7654 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7657 for (i = 0; i < num; i++) {
7658 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7659 cmd.payloads[i].address = msgs[i].addr;
7660 cmd.payloads[i].length = msgs[i].len;
7661 cmd.payloads[i].data = msgs[i].buf;
7665 ddc_service->ctx->dc,
7666 ddc_service->ddc_pin->hw_info.ddc_channel,
7670 kfree(cmd.payloads);
7674 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7676 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7679 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7680 .master_xfer = amdgpu_dm_i2c_xfer,
7681 .functionality = amdgpu_dm_i2c_func,
7684 static struct amdgpu_i2c_adapter *
7685 create_i2c(struct ddc_service *ddc_service,
7689 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7690 struct amdgpu_i2c_adapter *i2c;
7692 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7695 i2c->base.owner = THIS_MODULE;
7696 i2c->base.class = I2C_CLASS_DDC;
7697 i2c->base.dev.parent = &adev->pdev->dev;
7698 i2c->base.algo = &amdgpu_dm_i2c_algo;
7699 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7700 i2c_set_adapdata(&i2c->base, i2c);
7701 i2c->ddc_service = ddc_service;
7702 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7709 * Note: this function assumes that dc_link_detect() was called for the
7710 * dc_link which will be represented by this aconnector.
7712 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7713 struct amdgpu_dm_connector *aconnector,
7714 uint32_t link_index,
7715 struct amdgpu_encoder *aencoder)
7719 struct dc *dc = dm->dc;
7720 struct dc_link *link = dc_get_link_at_index(dc, link_index);
7721 struct amdgpu_i2c_adapter *i2c;
7723 link->priv = aconnector;
7725 DRM_DEBUG_DRIVER("%s()\n", __func__);
7727 i2c = create_i2c(link->ddc, link->link_index, &res);
7729 DRM_ERROR("Failed to create i2c adapter data\n");
7733 aconnector->i2c = i2c;
7734 res = i2c_add_adapter(&i2c->base);
7737 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7741 connector_type = to_drm_connector_type(link->connector_signal);
7743 res = drm_connector_init_with_ddc(
7746 &amdgpu_dm_connector_funcs,
7751 DRM_ERROR("connector_init failed\n");
7752 aconnector->connector_id = -1;
7756 drm_connector_helper_add(
7758 &amdgpu_dm_connector_helper_funcs);
7760 amdgpu_dm_connector_init_helper(
7767 drm_connector_attach_encoder(
7768 &aconnector->base, &aencoder->base);
7770 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7771 || connector_type == DRM_MODE_CONNECTOR_eDP)
7772 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7777 aconnector->i2c = NULL;
7782 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7784 switch (adev->mode_info.num_crtc) {
7801 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7802 struct amdgpu_encoder *aencoder,
7803 uint32_t link_index)
7805 struct amdgpu_device *adev = drm_to_adev(dev);
7807 int res = drm_encoder_init(dev,
7809 &amdgpu_dm_encoder_funcs,
7810 DRM_MODE_ENCODER_TMDS,
7813 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7816 aencoder->encoder_id = link_index;
7818 aencoder->encoder_id = -1;
7820 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7825 static void manage_dm_interrupts(struct amdgpu_device *adev,
7826 struct amdgpu_crtc *acrtc,
7830 * We have no guarantee that the frontend index maps to the same
7831 * backend index - some even map to more than one.
7833 * TODO: Use a different interrupt or check DC itself for the mapping.
7836 amdgpu_display_crtc_idx_to_irq_type(
7841 drm_crtc_vblank_on(&acrtc->base);
7844 &adev->pageflip_irq,
7846 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7853 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7861 &adev->pageflip_irq,
7863 drm_crtc_vblank_off(&acrtc->base);
7867 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7868 struct amdgpu_crtc *acrtc)
7871 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7874 * This reads the current state for the IRQ and force reapplies
7875 * the setting to hardware.
7877 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7881 is_scaling_state_different(const struct dm_connector_state *dm_state,
7882 const struct dm_connector_state *old_dm_state)
7884 if (dm_state->scaling != old_dm_state->scaling)
7886 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7887 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7889 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7890 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7892 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7893 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7898 #ifdef CONFIG_DRM_AMD_DC_HDCP
7899 static bool is_content_protection_different(struct drm_connector_state *state,
7900 const struct drm_connector_state *old_state,
7901 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7903 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7904 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7906 /* Handle: Type0/1 change */
7907 if (old_state->hdcp_content_type != state->hdcp_content_type &&
7908 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7909 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7913 /* CP is being re enabled, ignore this
7915 * Handles: ENABLED -> DESIRED
7917 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7918 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7919 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7923 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7925 * Handles: UNDESIRED -> ENABLED
7927 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7928 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7929 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7931 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7932 * hot-plug, headless s3, dpms
7934 * Handles: DESIRED -> DESIRED (Special case)
7936 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7937 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7938 dm_con_state->update_hdcp = false;
7943 * Handles: UNDESIRED -> UNDESIRED
7944 * DESIRED -> DESIRED
7945 * ENABLED -> ENABLED
7947 if (old_state->content_protection == state->content_protection)
7951 * Handles: UNDESIRED -> DESIRED
7952 * DESIRED -> UNDESIRED
7953 * ENABLED -> UNDESIRED
7955 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7959 * Handles: DESIRED -> ENABLED
7965 static void remove_stream(struct amdgpu_device *adev,
7966 struct amdgpu_crtc *acrtc,
7967 struct dc_stream_state *stream)
7969 /* this is the update mode case */
7971 acrtc->otg_inst = -1;
7972 acrtc->enabled = false;
7975 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7976 struct dc_cursor_position *position)
7978 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7980 int xorigin = 0, yorigin = 0;
7982 if (!crtc || !plane->state->fb)
7985 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7986 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7987 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7989 plane->state->crtc_w,
7990 plane->state->crtc_h);
7994 x = plane->state->crtc_x;
7995 y = plane->state->crtc_y;
7997 if (x <= -amdgpu_crtc->max_cursor_width ||
7998 y <= -amdgpu_crtc->max_cursor_height)
8002 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8006 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8009 position->enable = true;
8010 position->translate_by_source = true;
8013 position->x_hotspot = xorigin;
8014 position->y_hotspot = yorigin;
8019 static void handle_cursor_update(struct drm_plane *plane,
8020 struct drm_plane_state *old_plane_state)
8022 struct amdgpu_device *adev = drm_to_adev(plane->dev);
8023 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8024 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8025 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8026 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8027 uint64_t address = afb ? afb->address : 0;
8028 struct dc_cursor_position position = {0};
8029 struct dc_cursor_attributes attributes;
8032 if (!plane->state->fb && !old_plane_state->fb)
8035 DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8037 amdgpu_crtc->crtc_id,
8038 plane->state->crtc_w,
8039 plane->state->crtc_h);
8041 ret = get_cursor_position(plane, crtc, &position);
8045 if (!position.enable) {
8046 /* turn off cursor */
8047 if (crtc_state && crtc_state->stream) {
8048 mutex_lock(&adev->dm.dc_lock);
8049 dc_stream_set_cursor_position(crtc_state->stream,
8051 mutex_unlock(&adev->dm.dc_lock);
8056 amdgpu_crtc->cursor_width = plane->state->crtc_w;
8057 amdgpu_crtc->cursor_height = plane->state->crtc_h;
8059 memset(&attributes, 0, sizeof(attributes));
8060 attributes.address.high_part = upper_32_bits(address);
8061 attributes.address.low_part = lower_32_bits(address);
8062 attributes.width = plane->state->crtc_w;
8063 attributes.height = plane->state->crtc_h;
8064 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8065 attributes.rotation_angle = 0;
8066 attributes.attribute_flags.value = 0;
8068 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8070 if (crtc_state->stream) {
8071 mutex_lock(&adev->dm.dc_lock);
8072 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8074 DRM_ERROR("DC failed to set cursor attributes\n");
8076 if (!dc_stream_set_cursor_position(crtc_state->stream,
8078 DRM_ERROR("DC failed to set cursor position\n");
8079 mutex_unlock(&adev->dm.dc_lock);
8083 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8086 assert_spin_locked(&acrtc->base.dev->event_lock);
8087 WARN_ON(acrtc->event);
8089 acrtc->event = acrtc->base.state->event;
8091 /* Set the flip status */
8092 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8094 /* Mark this event as consumed */
8095 acrtc->base.state->event = NULL;
8097 DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8101 static void update_freesync_state_on_stream(
8102 struct amdgpu_display_manager *dm,
8103 struct dm_crtc_state *new_crtc_state,
8104 struct dc_stream_state *new_stream,
8105 struct dc_plane_state *surface,
8106 u32 flip_timestamp_in_us)
8108 struct mod_vrr_params vrr_params;
8109 struct dc_info_packet vrr_infopacket = {0};
8110 struct amdgpu_device *adev = dm->adev;
8111 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8112 unsigned long flags;
8113 bool pack_sdp_v1_3 = false;
8119 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8120 * For now it's sufficient to just guard against these conditions.
8123 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8126 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8127 vrr_params = acrtc->dm_irq_params.vrr_params;
8130 mod_freesync_handle_preflip(
8131 dm->freesync_module,
8134 flip_timestamp_in_us,
8137 if (adev->family < AMDGPU_FAMILY_AI &&
8138 amdgpu_dm_vrr_active(new_crtc_state)) {
8139 mod_freesync_handle_v_update(dm->freesync_module,
8140 new_stream, &vrr_params);
8142 /* Need to call this before the frame ends. */
8143 dc_stream_adjust_vmin_vmax(dm->dc,
8144 new_crtc_state->stream,
8145 &vrr_params.adjust);
8149 mod_freesync_build_vrr_infopacket(
8150 dm->freesync_module,
8154 TRANSFER_FUNC_UNKNOWN,
8158 new_crtc_state->freesync_timing_changed |=
8159 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8161 sizeof(vrr_params.adjust)) != 0);
8163 new_crtc_state->freesync_vrr_info_changed |=
8164 (memcmp(&new_crtc_state->vrr_infopacket,
8166 sizeof(vrr_infopacket)) != 0);
8168 acrtc->dm_irq_params.vrr_params = vrr_params;
8169 new_crtc_state->vrr_infopacket = vrr_infopacket;
8171 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8172 new_stream->vrr_infopacket = vrr_infopacket;
8174 if (new_crtc_state->freesync_vrr_info_changed)
8175 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8176 new_crtc_state->base.crtc->base.id,
8177 (int)new_crtc_state->base.vrr_enabled,
8178 (int)vrr_params.state);
8180 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8183 static void update_stream_irq_parameters(
8184 struct amdgpu_display_manager *dm,
8185 struct dm_crtc_state *new_crtc_state)
8187 struct dc_stream_state *new_stream = new_crtc_state->stream;
8188 struct mod_vrr_params vrr_params;
8189 struct mod_freesync_config config = new_crtc_state->freesync_config;
8190 struct amdgpu_device *adev = dm->adev;
8191 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8192 unsigned long flags;
8198 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8199 * For now it's sufficient to just guard against these conditions.
8201 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8204 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8205 vrr_params = acrtc->dm_irq_params.vrr_params;
8207 if (new_crtc_state->vrr_supported &&
8208 config.min_refresh_in_uhz &&
8209 config.max_refresh_in_uhz) {
8211 * if freesync compatible mode was set, config.state will be set
8214 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8215 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8216 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8217 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8218 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8219 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8220 vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8222 config.state = new_crtc_state->base.vrr_enabled ?
8223 VRR_STATE_ACTIVE_VARIABLE :
8227 config.state = VRR_STATE_UNSUPPORTED;
8230 mod_freesync_build_vrr_params(dm->freesync_module,
8232 &config, &vrr_params);
8234 new_crtc_state->freesync_timing_changed |=
8235 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8236 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8238 new_crtc_state->freesync_config = config;
8239 /* Copy state for access from DM IRQ handler */
8240 acrtc->dm_irq_params.freesync_config = config;
8241 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8242 acrtc->dm_irq_params.vrr_params = vrr_params;
8243 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8246 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8247 struct dm_crtc_state *new_state)
8249 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8250 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8252 if (!old_vrr_active && new_vrr_active) {
8253 /* Transition VRR inactive -> active:
8254 * While VRR is active, we must not disable vblank irq, as a
8255 * reenable after disable would compute bogus vblank/pflip
8256 * timestamps if it likely happened inside display front-porch.
8258 * We also need vupdate irq for the actual core vblank handling
8261 dm_set_vupdate_irq(new_state->base.crtc, true);
8262 drm_crtc_vblank_get(new_state->base.crtc);
8263 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8264 __func__, new_state->base.crtc->base.id);
8265 } else if (old_vrr_active && !new_vrr_active) {
8266 /* Transition VRR active -> inactive:
8267 * Allow vblank irq disable again for fixed refresh rate.
8269 dm_set_vupdate_irq(new_state->base.crtc, false);
8270 drm_crtc_vblank_put(new_state->base.crtc);
8271 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8272 __func__, new_state->base.crtc->base.id);
8276 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8278 struct drm_plane *plane;
8279 struct drm_plane_state *old_plane_state;
8283 * TODO: Make this per-stream so we don't issue redundant updates for
8284 * commits with multiple streams.
8286 for_each_old_plane_in_state(state, plane, old_plane_state, i)
8287 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8288 handle_cursor_update(plane, old_plane_state);
8291 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8292 struct dc_state *dc_state,
8293 struct drm_device *dev,
8294 struct amdgpu_display_manager *dm,
8295 struct drm_crtc *pcrtc,
8296 bool wait_for_vblank)
8299 uint64_t timestamp_ns;
8300 struct drm_plane *plane;
8301 struct drm_plane_state *old_plane_state, *new_plane_state;
8302 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8303 struct drm_crtc_state *new_pcrtc_state =
8304 drm_atomic_get_new_crtc_state(state, pcrtc);
8305 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8306 struct dm_crtc_state *dm_old_crtc_state =
8307 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8308 int planes_count = 0, vpos, hpos;
8310 unsigned long flags;
8311 struct amdgpu_bo *abo;
8312 uint32_t target_vblank, last_flip_vblank;
8313 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8314 bool pflip_present = false;
8316 struct dc_surface_update surface_updates[MAX_SURFACES];
8317 struct dc_plane_info plane_infos[MAX_SURFACES];
8318 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8319 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8320 struct dc_stream_update stream_update;
8323 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8326 dm_error("Failed to allocate update bundle\n");
8331 * Disable the cursor first if we're disabling all the planes.
8332 * It'll remain on the screen after the planes are re-enabled
8335 if (acrtc_state->active_planes == 0)
8336 amdgpu_dm_commit_cursors(state);
8338 /* update planes when needed */
8339 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8340 struct drm_crtc *crtc = new_plane_state->crtc;
8341 struct drm_crtc_state *new_crtc_state;
8342 struct drm_framebuffer *fb = new_plane_state->fb;
8343 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8344 bool plane_needs_flip;
8345 struct dc_plane_state *dc_plane;
8346 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8348 /* Cursor plane is handled after stream updates */
8349 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8352 if (!fb || !crtc || pcrtc != crtc)
8355 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8356 if (!new_crtc_state->active)
8359 dc_plane = dm_new_plane_state->dc_state;
8361 bundle->surface_updates[planes_count].surface = dc_plane;
8362 if (new_pcrtc_state->color_mgmt_changed) {
8363 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8364 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8365 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8368 fill_dc_scaling_info(new_plane_state,
8369 &bundle->scaling_infos[planes_count]);
8371 bundle->surface_updates[planes_count].scaling_info =
8372 &bundle->scaling_infos[planes_count];
8374 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8376 pflip_present = pflip_present || plane_needs_flip;
8378 if (!plane_needs_flip) {
8383 abo = gem_to_amdgpu_bo(fb->obj[0]);
8386 * Wait for all fences on this FB. Do limited wait to avoid
8387 * deadlock during GPU reset when this fence will not signal
8388 * but we hold reservation lock for the BO.
8390 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
8392 msecs_to_jiffies(5000));
8393 if (unlikely(r <= 0))
8394 DRM_ERROR("Waiting for fences timed out!");
8396 fill_dc_plane_info_and_addr(
8397 dm->adev, new_plane_state,
8399 &bundle->plane_infos[planes_count],
8400 &bundle->flip_addrs[planes_count].address,
8401 afb->tmz_surface, false);
8403 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
8404 new_plane_state->plane->index,
8405 bundle->plane_infos[planes_count].dcc.enable);
8407 bundle->surface_updates[planes_count].plane_info =
8408 &bundle->plane_infos[planes_count];
8411 * Only allow immediate flips for fast updates that don't
8412 * change FB pitch, DCC state, rotation or mirroing.
8414 bundle->flip_addrs[planes_count].flip_immediate =
8415 crtc->state->async_flip &&
8416 acrtc_state->update_type == UPDATE_TYPE_FAST;
8418 timestamp_ns = ktime_get_ns();
8419 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8420 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8421 bundle->surface_updates[planes_count].surface = dc_plane;
8423 if (!bundle->surface_updates[planes_count].surface) {
8424 DRM_ERROR("No surface for CRTC: id=%d\n",
8425 acrtc_attach->crtc_id);
8429 if (plane == pcrtc->primary)
8430 update_freesync_state_on_stream(
8433 acrtc_state->stream,
8435 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
8437 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
8439 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8440 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
8446 if (pflip_present) {
8448 /* Use old throttling in non-vrr fixed refresh rate mode
8449 * to keep flip scheduling based on target vblank counts
8450 * working in a backwards compatible way, e.g., for
8451 * clients using the GLX_OML_sync_control extension or
8452 * DRI3/Present extension with defined target_msc.
8454 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
8457 /* For variable refresh rate mode only:
8458 * Get vblank of last completed flip to avoid > 1 vrr
8459 * flips per video frame by use of throttling, but allow
8460 * flip programming anywhere in the possibly large
8461 * variable vrr vblank interval for fine-grained flip
8462 * timing control and more opportunity to avoid stutter
8463 * on late submission of flips.
8465 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8466 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
8467 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8470 target_vblank = last_flip_vblank + wait_for_vblank;
8473 * Wait until we're out of the vertical blank period before the one
8474 * targeted by the flip
8476 while ((acrtc_attach->enabled &&
8477 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8478 0, &vpos, &hpos, NULL,
8479 NULL, &pcrtc->hwmode)
8480 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8481 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8482 (int)(target_vblank -
8483 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8484 usleep_range(1000, 1100);
8488 * Prepare the flip event for the pageflip interrupt to handle.
8490 * This only works in the case where we've already turned on the
8491 * appropriate hardware blocks (eg. HUBP) so in the transition case
8492 * from 0 -> n planes we have to skip a hardware generated event
8493 * and rely on sending it from software.
8495 if (acrtc_attach->base.state->event &&
8496 acrtc_state->active_planes > 0) {
8497 drm_crtc_vblank_get(pcrtc);
8499 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8501 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8502 prepare_flip_isr(acrtc_attach);
8504 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8507 if (acrtc_state->stream) {
8508 if (acrtc_state->freesync_vrr_info_changed)
8509 bundle->stream_update.vrr_infopacket =
8510 &acrtc_state->stream->vrr_infopacket;
8514 /* Update the planes if changed or disable if we don't have any. */
8515 if ((planes_count || acrtc_state->active_planes == 0) &&
8516 acrtc_state->stream) {
8517 bundle->stream_update.stream = acrtc_state->stream;
8518 if (new_pcrtc_state->mode_changed) {
8519 bundle->stream_update.src = acrtc_state->stream->src;
8520 bundle->stream_update.dst = acrtc_state->stream->dst;
8523 if (new_pcrtc_state->color_mgmt_changed) {
8525 * TODO: This isn't fully correct since we've actually
8526 * already modified the stream in place.
8528 bundle->stream_update.gamut_remap =
8529 &acrtc_state->stream->gamut_remap_matrix;
8530 bundle->stream_update.output_csc_transform =
8531 &acrtc_state->stream->csc_color_matrix;
8532 bundle->stream_update.out_transfer_func =
8533 acrtc_state->stream->out_transfer_func;
8536 acrtc_state->stream->abm_level = acrtc_state->abm_level;
8537 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
8538 bundle->stream_update.abm_level = &acrtc_state->abm_level;
8541 * If FreeSync state on the stream has changed then we need to
8542 * re-adjust the min/max bounds now that DC doesn't handle this
8543 * as part of commit.
8545 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
8546 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8547 dc_stream_adjust_vmin_vmax(
8548 dm->dc, acrtc_state->stream,
8549 &acrtc_attach->dm_irq_params.vrr_params.adjust);
8550 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8552 mutex_lock(&dm->dc_lock);
8553 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8554 acrtc_state->stream->link->psr_settings.psr_allow_active)
8555 amdgpu_dm_psr_disable(acrtc_state->stream);
8557 dc_commit_updates_for_stream(dm->dc,
8558 bundle->surface_updates,
8560 acrtc_state->stream,
8561 &bundle->stream_update,
8565 * Enable or disable the interrupts on the backend.
8567 * Most pipes are put into power gating when unused.
8569 * When power gating is enabled on a pipe we lose the
8570 * interrupt enablement state when power gating is disabled.
8572 * So we need to update the IRQ control state in hardware
8573 * whenever the pipe turns on (since it could be previously
8574 * power gated) or off (since some pipes can't be power gated
8577 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8578 dm_update_pflip_irq_state(drm_to_adev(dev),
8581 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8582 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8583 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8584 amdgpu_dm_link_setup_psr(acrtc_state->stream);
8585 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
8586 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8587 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
8588 amdgpu_dm_psr_enable(acrtc_state->stream);
8591 mutex_unlock(&dm->dc_lock);
8595 * Update cursor state *after* programming all the planes.
8596 * This avoids redundant programming in the case where we're going
8597 * to be disabling a single plane - those pipes are being disabled.
8599 if (acrtc_state->active_planes)
8600 amdgpu_dm_commit_cursors(state);
8606 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8607 struct drm_atomic_state *state)
8609 struct amdgpu_device *adev = drm_to_adev(dev);
8610 struct amdgpu_dm_connector *aconnector;
8611 struct drm_connector *connector;
8612 struct drm_connector_state *old_con_state, *new_con_state;
8613 struct drm_crtc_state *new_crtc_state;
8614 struct dm_crtc_state *new_dm_crtc_state;
8615 const struct dc_stream_status *status;
8618 /* Notify device removals. */
8619 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8620 if (old_con_state->crtc != new_con_state->crtc) {
8621 /* CRTC changes require notification. */
8625 if (!new_con_state->crtc)
8628 new_crtc_state = drm_atomic_get_new_crtc_state(
8629 state, new_con_state->crtc);
8631 if (!new_crtc_state)
8634 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8638 aconnector = to_amdgpu_dm_connector(connector);
8640 mutex_lock(&adev->dm.audio_lock);
8641 inst = aconnector->audio_inst;
8642 aconnector->audio_inst = -1;
8643 mutex_unlock(&adev->dm.audio_lock);
8645 amdgpu_dm_audio_eld_notify(adev, inst);
8648 /* Notify audio device additions. */
8649 for_each_new_connector_in_state(state, connector, new_con_state, i) {
8650 if (!new_con_state->crtc)
8653 new_crtc_state = drm_atomic_get_new_crtc_state(
8654 state, new_con_state->crtc);
8656 if (!new_crtc_state)
8659 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8662 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8663 if (!new_dm_crtc_state->stream)
8666 status = dc_stream_get_status(new_dm_crtc_state->stream);
8670 aconnector = to_amdgpu_dm_connector(connector);
8672 mutex_lock(&adev->dm.audio_lock);
8673 inst = status->audio_inst;
8674 aconnector->audio_inst = inst;
8675 mutex_unlock(&adev->dm.audio_lock);
8677 amdgpu_dm_audio_eld_notify(adev, inst);
8682 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8683 * @crtc_state: the DRM CRTC state
8684 * @stream_state: the DC stream state.
8686 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8687 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8689 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8690 struct dc_stream_state *stream_state)
8692 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8696 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8697 * @state: The atomic state to commit
8699 * This will tell DC to commit the constructed DC state from atomic_check,
8700 * programming the hardware. Any failures here implies a hardware failure, since
8701 * atomic check should have filtered anything non-kosher.
8703 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8705 struct drm_device *dev = state->dev;
8706 struct amdgpu_device *adev = drm_to_adev(dev);
8707 struct amdgpu_display_manager *dm = &adev->dm;
8708 struct dm_atomic_state *dm_state;
8709 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8711 struct drm_crtc *crtc;
8712 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8713 unsigned long flags;
8714 bool wait_for_vblank = true;
8715 struct drm_connector *connector;
8716 struct drm_connector_state *old_con_state, *new_con_state;
8717 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8718 int crtc_disable_count = 0;
8719 bool mode_set_reset_required = false;
8721 trace_amdgpu_dm_atomic_commit_tail_begin(state);
8723 drm_atomic_helper_update_legacy_modeset_state(dev, state);
8725 dm_state = dm_atomic_get_new_state(state);
8726 if (dm_state && dm_state->context) {
8727 dc_state = dm_state->context;
8729 /* No state changes, retain current state. */
8730 dc_state_temp = dc_create_state(dm->dc);
8731 ASSERT(dc_state_temp);
8732 dc_state = dc_state_temp;
8733 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8736 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8737 new_crtc_state, i) {
8738 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8740 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8742 if (old_crtc_state->active &&
8743 (!new_crtc_state->active ||
8744 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8745 manage_dm_interrupts(adev, acrtc, false);
8746 dc_stream_release(dm_old_crtc_state->stream);
8750 drm_atomic_helper_calc_timestamping_constants(state);
8752 /* update changed items */
8753 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8754 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8756 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8757 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8760 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8761 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8762 "connectors_changed:%d\n",
8764 new_crtc_state->enable,
8765 new_crtc_state->active,
8766 new_crtc_state->planes_changed,
8767 new_crtc_state->mode_changed,
8768 new_crtc_state->active_changed,
8769 new_crtc_state->connectors_changed);
8771 /* Disable cursor if disabling crtc */
8772 if (old_crtc_state->active && !new_crtc_state->active) {
8773 struct dc_cursor_position position;
8775 memset(&position, 0, sizeof(position));
8776 mutex_lock(&dm->dc_lock);
8777 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8778 mutex_unlock(&dm->dc_lock);
8781 /* Copy all transient state flags into dc state */
8782 if (dm_new_crtc_state->stream) {
8783 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8784 dm_new_crtc_state->stream);
8787 /* handles headless hotplug case, updating new_state and
8788 * aconnector as needed
8791 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8793 DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8795 if (!dm_new_crtc_state->stream) {
8797 * this could happen because of issues with
8798 * userspace notifications delivery.
8799 * In this case userspace tries to set mode on
8800 * display which is disconnected in fact.
8801 * dc_sink is NULL in this case on aconnector.
8802 * We expect reset mode will come soon.
8804 * This can also happen when unplug is done
8805 * during resume sequence ended
8807 * In this case, we want to pretend we still
8808 * have a sink to keep the pipe running so that
8809 * hw state is consistent with the sw state
8811 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8812 __func__, acrtc->base.base.id);
8816 if (dm_old_crtc_state->stream)
8817 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8819 pm_runtime_get_noresume(dev->dev);
8821 acrtc->enabled = true;
8822 acrtc->hw_mode = new_crtc_state->mode;
8823 crtc->hwmode = new_crtc_state->mode;
8824 mode_set_reset_required = true;
8825 } else if (modereset_required(new_crtc_state)) {
8826 DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8827 /* i.e. reset mode */
8828 if (dm_old_crtc_state->stream)
8829 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8831 mode_set_reset_required = true;
8833 } /* for_each_crtc_in_state() */
8836 /* if there mode set or reset, disable eDP PSR */
8837 if (mode_set_reset_required)
8838 amdgpu_dm_psr_disable_all(dm);
8840 dm_enable_per_frame_crtc_master_sync(dc_state);
8841 mutex_lock(&dm->dc_lock);
8842 WARN_ON(!dc_commit_state(dm->dc, dc_state));
8843 #if defined(CONFIG_DRM_AMD_DC_DCN)
8844 /* Allow idle optimization when vblank count is 0 for display off */
8845 if (dm->active_vblank_irq_count == 0)
8846 dc_allow_idle_optimizations(dm->dc,true);
8848 mutex_unlock(&dm->dc_lock);
8851 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8852 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8854 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8856 if (dm_new_crtc_state->stream != NULL) {
8857 const struct dc_stream_status *status =
8858 dc_stream_get_status(dm_new_crtc_state->stream);
8861 status = dc_stream_get_status_from_state(dc_state,
8862 dm_new_crtc_state->stream);
8864 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8866 acrtc->otg_inst = status->primary_otg_inst;
8869 #ifdef CONFIG_DRM_AMD_DC_HDCP
8870 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8871 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8872 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8873 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8875 new_crtc_state = NULL;
8878 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8880 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8882 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8883 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8884 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8885 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8886 dm_new_con_state->update_hdcp = true;
8890 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8891 hdcp_update_display(
8892 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8893 new_con_state->hdcp_content_type,
8894 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
8898 /* Handle connector state changes */
8899 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8900 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8901 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8902 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8903 struct dc_surface_update dummy_updates[MAX_SURFACES];
8904 struct dc_stream_update stream_update;
8905 struct dc_info_packet hdr_packet;
8906 struct dc_stream_status *status = NULL;
8907 bool abm_changed, hdr_changed, scaling_changed;
8909 memset(&dummy_updates, 0, sizeof(dummy_updates));
8910 memset(&stream_update, 0, sizeof(stream_update));
8913 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8914 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8917 /* Skip any modesets/resets */
8918 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8921 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8922 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8924 scaling_changed = is_scaling_state_different(dm_new_con_state,
8927 abm_changed = dm_new_crtc_state->abm_level !=
8928 dm_old_crtc_state->abm_level;
8931 is_hdr_metadata_different(old_con_state, new_con_state);
8933 if (!scaling_changed && !abm_changed && !hdr_changed)
8936 stream_update.stream = dm_new_crtc_state->stream;
8937 if (scaling_changed) {
8938 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8939 dm_new_con_state, dm_new_crtc_state->stream);
8941 stream_update.src = dm_new_crtc_state->stream->src;
8942 stream_update.dst = dm_new_crtc_state->stream->dst;
8946 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8948 stream_update.abm_level = &dm_new_crtc_state->abm_level;
8952 fill_hdr_info_packet(new_con_state, &hdr_packet);
8953 stream_update.hdr_static_metadata = &hdr_packet;
8956 status = dc_stream_get_status(dm_new_crtc_state->stream);
8958 WARN_ON(!status->plane_count);
8961 * TODO: DC refuses to perform stream updates without a dc_surface_update.
8962 * Here we create an empty update on each plane.
8963 * To fix this, DC should permit updating only stream properties.
8965 for (j = 0; j < status->plane_count; j++)
8966 dummy_updates[j].surface = status->plane_states[0];
8969 mutex_lock(&dm->dc_lock);
8970 dc_commit_updates_for_stream(dm->dc,
8972 status->plane_count,
8973 dm_new_crtc_state->stream,
8976 mutex_unlock(&dm->dc_lock);
8979 /* Count number of newly disabled CRTCs for dropping PM refs later. */
8980 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8981 new_crtc_state, i) {
8982 if (old_crtc_state->active && !new_crtc_state->active)
8983 crtc_disable_count++;
8985 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8986 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8988 /* For freesync config update on crtc state and params for irq */
8989 update_stream_irq_parameters(dm, dm_new_crtc_state);
8991 /* Handle vrr on->off / off->on transitions */
8992 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8997 * Enable interrupts for CRTCs that are newly enabled or went through
8998 * a modeset. It was intentionally deferred until after the front end
8999 * state was modified to wait until the OTG was on and so the IRQ
9000 * handlers didn't access stale or invalid state.
9002 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9003 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9004 #ifdef CONFIG_DEBUG_FS
9005 bool configure_crc = false;
9006 enum amdgpu_dm_pipe_crc_source cur_crc_src;
9008 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9010 if (new_crtc_state->active &&
9011 (!old_crtc_state->active ||
9012 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9013 dc_stream_retain(dm_new_crtc_state->stream);
9014 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9015 manage_dm_interrupts(adev, acrtc, true);
9017 #ifdef CONFIG_DEBUG_FS
9019 * Frontend may have changed so reapply the CRC capture
9020 * settings for the stream.
9022 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9023 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9024 cur_crc_src = acrtc->dm_irq_params.crc_src;
9025 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9027 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9028 configure_crc = true;
9029 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9030 if (amdgpu_dm_crc_window_is_activated(crtc))
9031 configure_crc = false;
9036 amdgpu_dm_crtc_configure_crc_source(
9037 crtc, dm_new_crtc_state, cur_crc_src);
9042 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9043 if (new_crtc_state->async_flip)
9044 wait_for_vblank = false;
9046 /* update planes when needed per crtc*/
9047 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9048 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9050 if (dm_new_crtc_state->stream)
9051 amdgpu_dm_commit_planes(state, dc_state, dev,
9052 dm, crtc, wait_for_vblank);
9055 /* Update audio instances for each connector. */
9056 amdgpu_dm_commit_audio(dev, state);
9059 * send vblank event on all events not handled in flip and
9060 * mark consumed event for drm_atomic_helper_commit_hw_done
9062 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9063 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9065 if (new_crtc_state->event)
9066 drm_send_event_locked(dev, &new_crtc_state->event->base);
9068 new_crtc_state->event = NULL;
9070 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9072 /* Signal HW programming completion */
9073 drm_atomic_helper_commit_hw_done(state);
9075 if (wait_for_vblank)
9076 drm_atomic_helper_wait_for_flip_done(dev, state);
9078 drm_atomic_helper_cleanup_planes(dev, state);
9080 /* return the stolen vga memory back to VRAM */
9081 if (!adev->mman.keep_stolen_vga_memory)
9082 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9083 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9086 * Finally, drop a runtime PM reference for each newly disabled CRTC,
9087 * so we can put the GPU into runtime suspend if we're not driving any
9090 for (i = 0; i < crtc_disable_count; i++)
9091 pm_runtime_put_autosuspend(dev->dev);
9092 pm_runtime_mark_last_busy(dev->dev);
9095 dc_release_state(dc_state_temp);
9099 static int dm_force_atomic_commit(struct drm_connector *connector)
9102 struct drm_device *ddev = connector->dev;
9103 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9104 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9105 struct drm_plane *plane = disconnected_acrtc->base.primary;
9106 struct drm_connector_state *conn_state;
9107 struct drm_crtc_state *crtc_state;
9108 struct drm_plane_state *plane_state;
9113 state->acquire_ctx = ddev->mode_config.acquire_ctx;
9115 /* Construct an atomic state to restore previous display setting */
9118 * Attach connectors to drm_atomic_state
9120 conn_state = drm_atomic_get_connector_state(state, connector);
9122 ret = PTR_ERR_OR_ZERO(conn_state);
9126 /* Attach crtc to drm_atomic_state*/
9127 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9129 ret = PTR_ERR_OR_ZERO(crtc_state);
9133 /* force a restore */
9134 crtc_state->mode_changed = true;
9136 /* Attach plane to drm_atomic_state */
9137 plane_state = drm_atomic_get_plane_state(state, plane);
9139 ret = PTR_ERR_OR_ZERO(plane_state);
9143 /* Call commit internally with the state we just constructed */
9144 ret = drm_atomic_commit(state);
9147 drm_atomic_state_put(state);
9149 DRM_ERROR("Restoring old state failed with %i\n", ret);
9155 * This function handles all cases when set mode does not come upon hotplug.
9156 * This includes when a display is unplugged then plugged back into the
9157 * same port and when running without usermode desktop manager supprot
9159 void dm_restore_drm_connector_state(struct drm_device *dev,
9160 struct drm_connector *connector)
9162 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9163 struct amdgpu_crtc *disconnected_acrtc;
9164 struct dm_crtc_state *acrtc_state;
9166 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9169 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9170 if (!disconnected_acrtc)
9173 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9174 if (!acrtc_state->stream)
9178 * If the previous sink is not released and different from the current,
9179 * we deduce we are in a state where we can not rely on usermode call
9180 * to turn on the display, so we do it here
9182 if (acrtc_state->stream->sink != aconnector->dc_sink)
9183 dm_force_atomic_commit(&aconnector->base);
9187 * Grabs all modesetting locks to serialize against any blocking commits,
9188 * Waits for completion of all non blocking commits.
9190 static int do_aquire_global_lock(struct drm_device *dev,
9191 struct drm_atomic_state *state)
9193 struct drm_crtc *crtc;
9194 struct drm_crtc_commit *commit;
9198 * Adding all modeset locks to aquire_ctx will
9199 * ensure that when the framework release it the
9200 * extra locks we are locking here will get released to
9202 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9206 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9207 spin_lock(&crtc->commit_lock);
9208 commit = list_first_entry_or_null(&crtc->commit_list,
9209 struct drm_crtc_commit, commit_entry);
9211 drm_crtc_commit_get(commit);
9212 spin_unlock(&crtc->commit_lock);
9218 * Make sure all pending HW programming completed and
9221 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9224 ret = wait_for_completion_interruptible_timeout(
9225 &commit->flip_done, 10*HZ);
9228 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9229 "timed out\n", crtc->base.id, crtc->name);
9231 drm_crtc_commit_put(commit);
9234 return ret < 0 ? ret : 0;
9237 static void get_freesync_config_for_crtc(
9238 struct dm_crtc_state *new_crtc_state,
9239 struct dm_connector_state *new_con_state)
9241 struct mod_freesync_config config = {0};
9242 struct amdgpu_dm_connector *aconnector =
9243 to_amdgpu_dm_connector(new_con_state->base.connector);
9244 struct drm_display_mode *mode = &new_crtc_state->base.mode;
9245 int vrefresh = drm_mode_vrefresh(mode);
9246 bool fs_vid_mode = false;
9248 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9249 vrefresh >= aconnector->min_vfreq &&
9250 vrefresh <= aconnector->max_vfreq;
9252 if (new_crtc_state->vrr_supported) {
9253 new_crtc_state->stream->ignore_msa_timing_param = true;
9254 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9256 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9257 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9258 config.vsif_supported = true;
9262 config.state = VRR_STATE_ACTIVE_FIXED;
9263 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9265 } else if (new_crtc_state->base.vrr_enabled) {
9266 config.state = VRR_STATE_ACTIVE_VARIABLE;
9268 config.state = VRR_STATE_INACTIVE;
9272 new_crtc_state->freesync_config = config;
9275 static void reset_freesync_config_for_crtc(
9276 struct dm_crtc_state *new_crtc_state)
9278 new_crtc_state->vrr_supported = false;
9280 memset(&new_crtc_state->vrr_infopacket, 0,
9281 sizeof(new_crtc_state->vrr_infopacket));
9285 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9286 struct drm_crtc_state *new_crtc_state)
9288 struct drm_display_mode old_mode, new_mode;
9290 if (!old_crtc_state || !new_crtc_state)
9293 old_mode = old_crtc_state->mode;
9294 new_mode = new_crtc_state->mode;
9296 if (old_mode.clock == new_mode.clock &&
9297 old_mode.hdisplay == new_mode.hdisplay &&
9298 old_mode.vdisplay == new_mode.vdisplay &&
9299 old_mode.htotal == new_mode.htotal &&
9300 old_mode.vtotal != new_mode.vtotal &&
9301 old_mode.hsync_start == new_mode.hsync_start &&
9302 old_mode.vsync_start != new_mode.vsync_start &&
9303 old_mode.hsync_end == new_mode.hsync_end &&
9304 old_mode.vsync_end != new_mode.vsync_end &&
9305 old_mode.hskew == new_mode.hskew &&
9306 old_mode.vscan == new_mode.vscan &&
9307 (old_mode.vsync_end - old_mode.vsync_start) ==
9308 (new_mode.vsync_end - new_mode.vsync_start))
9314 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9315 uint64_t num, den, res;
9316 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9318 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9320 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9321 den = (unsigned long long)new_crtc_state->mode.htotal *
9322 (unsigned long long)new_crtc_state->mode.vtotal;
9324 res = div_u64(num, den);
9325 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9328 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9329 struct drm_atomic_state *state,
9330 struct drm_crtc *crtc,
9331 struct drm_crtc_state *old_crtc_state,
9332 struct drm_crtc_state *new_crtc_state,
9334 bool *lock_and_validation_needed)
9336 struct dm_atomic_state *dm_state = NULL;
9337 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9338 struct dc_stream_state *new_stream;
9342 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9343 * update changed items
9345 struct amdgpu_crtc *acrtc = NULL;
9346 struct amdgpu_dm_connector *aconnector = NULL;
9347 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9348 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
9352 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9353 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9354 acrtc = to_amdgpu_crtc(crtc);
9355 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
9357 /* TODO This hack should go away */
9358 if (aconnector && enable) {
9359 /* Make sure fake sink is created in plug-in scenario */
9360 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9362 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9365 if (IS_ERR(drm_new_conn_state)) {
9366 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9370 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9371 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
9373 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9376 new_stream = create_validate_stream_for_sink(aconnector,
9377 &new_crtc_state->mode,
9379 dm_old_crtc_state->stream);
9382 * we can have no stream on ACTION_SET if a display
9383 * was disconnected during S3, in this case it is not an
9384 * error, the OS will be updated after detection, and
9385 * will do the right thing on next atomic commit
9389 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9390 __func__, acrtc->base.base.id);
9396 * TODO: Check VSDB bits to decide whether this should
9397 * be enabled or not.
9399 new_stream->triggered_crtc_reset.enabled =
9400 dm->force_timing_sync;
9402 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9404 ret = fill_hdr_info_packet(drm_new_conn_state,
9405 &new_stream->hdr_static_metadata);
9410 * If we already removed the old stream from the context
9411 * (and set the new stream to NULL) then we can't reuse
9412 * the old stream even if the stream and scaling are unchanged.
9413 * We'll hit the BUG_ON and black screen.
9415 * TODO: Refactor this function to allow this check to work
9416 * in all conditions.
9418 if (amdgpu_freesync_vid_mode &&
9419 dm_new_crtc_state->stream &&
9420 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9423 if (dm_new_crtc_state->stream &&
9424 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9425 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9426 new_crtc_state->mode_changed = false;
9427 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9428 new_crtc_state->mode_changed);
9432 /* mode_changed flag may get updated above, need to check again */
9433 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9437 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9438 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9439 "connectors_changed:%d\n",
9441 new_crtc_state->enable,
9442 new_crtc_state->active,
9443 new_crtc_state->planes_changed,
9444 new_crtc_state->mode_changed,
9445 new_crtc_state->active_changed,
9446 new_crtc_state->connectors_changed);
9448 /* Remove stream for any changed/disabled CRTC */
9451 if (!dm_old_crtc_state->stream)
9454 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
9455 is_timing_unchanged_for_freesync(new_crtc_state,
9457 new_crtc_state->mode_changed = false;
9459 "Mode change not required for front porch change, "
9460 "setting mode_changed to %d",
9461 new_crtc_state->mode_changed);
9463 set_freesync_fixed_config(dm_new_crtc_state);
9466 } else if (amdgpu_freesync_vid_mode && aconnector &&
9467 is_freesync_video_mode(&new_crtc_state->mode,
9469 set_freesync_fixed_config(dm_new_crtc_state);
9472 ret = dm_atomic_get_state(state, &dm_state);
9476 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9479 /* i.e. reset mode */
9480 if (dc_remove_stream_from_ctx(
9483 dm_old_crtc_state->stream) != DC_OK) {
9488 dc_stream_release(dm_old_crtc_state->stream);
9489 dm_new_crtc_state->stream = NULL;
9491 reset_freesync_config_for_crtc(dm_new_crtc_state);
9493 *lock_and_validation_needed = true;
9495 } else {/* Add stream for any updated/enabled CRTC */
9497 * Quick fix to prevent NULL pointer on new_stream when
9498 * added MST connectors not found in existing crtc_state in the chained mode
9499 * TODO: need to dig out the root cause of that
9501 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
9504 if (modereset_required(new_crtc_state))
9507 if (modeset_required(new_crtc_state, new_stream,
9508 dm_old_crtc_state->stream)) {
9510 WARN_ON(dm_new_crtc_state->stream);
9512 ret = dm_atomic_get_state(state, &dm_state);
9516 dm_new_crtc_state->stream = new_stream;
9518 dc_stream_retain(new_stream);
9520 DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
9523 if (dc_add_stream_to_ctx(
9526 dm_new_crtc_state->stream) != DC_OK) {
9531 *lock_and_validation_needed = true;
9536 /* Release extra reference */
9538 dc_stream_release(new_stream);
9541 * We want to do dc stream updates that do not require a
9542 * full modeset below.
9544 if (!(enable && aconnector && new_crtc_state->active))
9547 * Given above conditions, the dc state cannot be NULL because:
9548 * 1. We're in the process of enabling CRTCs (just been added
9549 * to the dc context, or already is on the context)
9550 * 2. Has a valid connector attached, and
9551 * 3. Is currently active and enabled.
9552 * => The dc stream state currently exists.
9554 BUG_ON(dm_new_crtc_state->stream == NULL);
9556 /* Scaling or underscan settings */
9557 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
9558 update_stream_scaling_settings(
9559 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
9562 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9565 * Color management settings. We also update color properties
9566 * when a modeset is needed, to ensure it gets reprogrammed.
9568 if (dm_new_crtc_state->base.color_mgmt_changed ||
9569 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9570 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
9575 /* Update Freesync settings. */
9576 get_freesync_config_for_crtc(dm_new_crtc_state,
9583 dc_stream_release(new_stream);
9587 static bool should_reset_plane(struct drm_atomic_state *state,
9588 struct drm_plane *plane,
9589 struct drm_plane_state *old_plane_state,
9590 struct drm_plane_state *new_plane_state)
9592 struct drm_plane *other;
9593 struct drm_plane_state *old_other_state, *new_other_state;
9594 struct drm_crtc_state *new_crtc_state;
9598 * TODO: Remove this hack once the checks below are sufficient
9599 * enough to determine when we need to reset all the planes on
9602 if (state->allow_modeset)
9605 /* Exit early if we know that we're adding or removing the plane. */
9606 if (old_plane_state->crtc != new_plane_state->crtc)
9609 /* old crtc == new_crtc == NULL, plane not in context. */
9610 if (!new_plane_state->crtc)
9614 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9616 if (!new_crtc_state)
9619 /* CRTC Degamma changes currently require us to recreate planes. */
9620 if (new_crtc_state->color_mgmt_changed)
9623 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9627 * If there are any new primary or overlay planes being added or
9628 * removed then the z-order can potentially change. To ensure
9629 * correct z-order and pipe acquisition the current DC architecture
9630 * requires us to remove and recreate all existing planes.
9632 * TODO: Come up with a more elegant solution for this.
9634 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9635 struct amdgpu_framebuffer *old_afb, *new_afb;
9636 if (other->type == DRM_PLANE_TYPE_CURSOR)
9639 if (old_other_state->crtc != new_plane_state->crtc &&
9640 new_other_state->crtc != new_plane_state->crtc)
9643 if (old_other_state->crtc != new_other_state->crtc)
9646 /* Src/dst size and scaling updates. */
9647 if (old_other_state->src_w != new_other_state->src_w ||
9648 old_other_state->src_h != new_other_state->src_h ||
9649 old_other_state->crtc_w != new_other_state->crtc_w ||
9650 old_other_state->crtc_h != new_other_state->crtc_h)
9653 /* Rotation / mirroring updates. */
9654 if (old_other_state->rotation != new_other_state->rotation)
9657 /* Blending updates. */
9658 if (old_other_state->pixel_blend_mode !=
9659 new_other_state->pixel_blend_mode)
9662 /* Alpha updates. */
9663 if (old_other_state->alpha != new_other_state->alpha)
9666 /* Colorspace changes. */
9667 if (old_other_state->color_range != new_other_state->color_range ||
9668 old_other_state->color_encoding != new_other_state->color_encoding)
9671 /* Framebuffer checks fall at the end. */
9672 if (!old_other_state->fb || !new_other_state->fb)
9675 /* Pixel format changes can require bandwidth updates. */
9676 if (old_other_state->fb->format != new_other_state->fb->format)
9679 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9680 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9682 /* Tiling and DCC changes also require bandwidth updates. */
9683 if (old_afb->tiling_flags != new_afb->tiling_flags ||
9684 old_afb->base.modifier != new_afb->base.modifier)
9691 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9692 struct drm_plane_state *new_plane_state,
9693 struct drm_framebuffer *fb)
9695 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9696 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9700 if (fb->width > new_acrtc->max_cursor_width ||
9701 fb->height > new_acrtc->max_cursor_height) {
9702 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9703 new_plane_state->fb->width,
9704 new_plane_state->fb->height);
9707 if (new_plane_state->src_w != fb->width << 16 ||
9708 new_plane_state->src_h != fb->height << 16) {
9709 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9713 /* Pitch in pixels */
9714 pitch = fb->pitches[0] / fb->format->cpp[0];
9716 if (fb->width != pitch) {
9717 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9726 /* FB pitch is supported by cursor plane */
9729 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9733 /* Core DRM takes care of checking FB modifiers, so we only need to
9734 * check tiling flags when the FB doesn't have a modifier. */
9735 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9736 if (adev->family < AMDGPU_FAMILY_AI) {
9737 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9738 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9739 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9741 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9744 DRM_DEBUG_ATOMIC("Cursor FB not linear");
9752 static int dm_update_plane_state(struct dc *dc,
9753 struct drm_atomic_state *state,
9754 struct drm_plane *plane,
9755 struct drm_plane_state *old_plane_state,
9756 struct drm_plane_state *new_plane_state,
9758 bool *lock_and_validation_needed)
9761 struct dm_atomic_state *dm_state = NULL;
9762 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9763 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9764 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9765 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9766 struct amdgpu_crtc *new_acrtc;
9771 new_plane_crtc = new_plane_state->crtc;
9772 old_plane_crtc = old_plane_state->crtc;
9773 dm_new_plane_state = to_dm_plane_state(new_plane_state);
9774 dm_old_plane_state = to_dm_plane_state(old_plane_state);
9776 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9777 if (!enable || !new_plane_crtc ||
9778 drm_atomic_plane_disabling(plane->state, new_plane_state))
9781 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9783 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9784 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9788 if (new_plane_state->fb) {
9789 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9790 new_plane_state->fb);
9798 needs_reset = should_reset_plane(state, plane, old_plane_state,
9801 /* Remove any changed/removed planes */
9806 if (!old_plane_crtc)
9809 old_crtc_state = drm_atomic_get_old_crtc_state(
9810 state, old_plane_crtc);
9811 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9813 if (!dm_old_crtc_state->stream)
9816 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9817 plane->base.id, old_plane_crtc->base.id);
9819 ret = dm_atomic_get_state(state, &dm_state);
9823 if (!dc_remove_plane_from_context(
9825 dm_old_crtc_state->stream,
9826 dm_old_plane_state->dc_state,
9827 dm_state->context)) {
9833 dc_plane_state_release(dm_old_plane_state->dc_state);
9834 dm_new_plane_state->dc_state = NULL;
9836 *lock_and_validation_needed = true;
9838 } else { /* Add new planes */
9839 struct dc_plane_state *dc_new_plane_state;
9841 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9844 if (!new_plane_crtc)
9847 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9848 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9850 if (!dm_new_crtc_state->stream)
9856 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9860 WARN_ON(dm_new_plane_state->dc_state);
9862 dc_new_plane_state = dc_create_plane_state(dc);
9863 if (!dc_new_plane_state)
9866 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
9867 plane->base.id, new_plane_crtc->base.id);
9869 ret = fill_dc_plane_attributes(
9870 drm_to_adev(new_plane_crtc->dev),
9875 dc_plane_state_release(dc_new_plane_state);
9879 ret = dm_atomic_get_state(state, &dm_state);
9881 dc_plane_state_release(dc_new_plane_state);
9886 * Any atomic check errors that occur after this will
9887 * not need a release. The plane state will be attached
9888 * to the stream, and therefore part of the atomic
9889 * state. It'll be released when the atomic state is
9892 if (!dc_add_plane_to_context(
9894 dm_new_crtc_state->stream,
9896 dm_state->context)) {
9898 dc_plane_state_release(dc_new_plane_state);
9902 dm_new_plane_state->dc_state = dc_new_plane_state;
9904 /* Tell DC to do a full surface update every time there
9905 * is a plane change. Inefficient, but works for now.
9907 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9909 *lock_and_validation_needed = true;
9916 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9917 struct drm_crtc *crtc,
9918 struct drm_crtc_state *new_crtc_state)
9920 struct drm_plane_state *new_cursor_state, *new_primary_state;
9921 int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9923 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9924 * cursor per pipe but it's going to inherit the scaling and
9925 * positioning from the underlying pipe. Check the cursor plane's
9926 * blending properties match the primary plane's. */
9928 new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9929 new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9930 if (!new_cursor_state || !new_primary_state ||
9931 !new_cursor_state->fb || !new_primary_state->fb) {
9935 cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9936 (new_cursor_state->src_w >> 16);
9937 cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9938 (new_cursor_state->src_h >> 16);
9940 primary_scale_w = new_primary_state->crtc_w * 1000 /
9941 (new_primary_state->src_w >> 16);
9942 primary_scale_h = new_primary_state->crtc_h * 1000 /
9943 (new_primary_state->src_h >> 16);
9945 if (cursor_scale_w != primary_scale_w ||
9946 cursor_scale_h != primary_scale_h) {
9947 DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9954 #if defined(CONFIG_DRM_AMD_DC_DCN)
9955 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9957 struct drm_connector *connector;
9958 struct drm_connector_state *conn_state;
9959 struct amdgpu_dm_connector *aconnector = NULL;
9961 for_each_new_connector_in_state(state, connector, conn_state, i) {
9962 if (conn_state->crtc != crtc)
9965 aconnector = to_amdgpu_dm_connector(connector);
9966 if (!aconnector->port || !aconnector->mst_port)
9975 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9979 static int validate_overlay(struct drm_atomic_state *state)
9982 struct drm_plane *plane;
9983 struct drm_plane_state *old_plane_state, *new_plane_state;
9984 struct drm_plane_state *primary_state, *overlay_state = NULL;
9986 /* Check if primary plane is contained inside overlay */
9987 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9988 if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
9989 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9992 overlay_state = new_plane_state;
9997 /* check if we're making changes to the overlay plane */
10001 /* check if overlay plane is enabled */
10002 if (!overlay_state->crtc)
10005 /* find the primary plane for the CRTC that the overlay is enabled on */
10006 primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
10007 if (IS_ERR(primary_state))
10008 return PTR_ERR(primary_state);
10010 /* check if primary plane is enabled */
10011 if (!primary_state->crtc)
10014 /* Perform the bounds check to ensure the overlay plane covers the primary */
10015 if (primary_state->crtc_x < overlay_state->crtc_x ||
10016 primary_state->crtc_y < overlay_state->crtc_y ||
10017 primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
10018 primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
10019 DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
10027 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10028 * @dev: The DRM device
10029 * @state: The atomic state to commit
10031 * Validate that the given atomic state is programmable by DC into hardware.
10032 * This involves constructing a &struct dc_state reflecting the new hardware
10033 * state we wish to commit, then querying DC to see if it is programmable. It's
10034 * important not to modify the existing DC state. Otherwise, atomic_check
10035 * may unexpectedly commit hardware changes.
10037 * When validating the DC state, it's important that the right locks are
10038 * acquired. For full updates case which removes/adds/updates streams on one
10039 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10040 * that any such full update commit will wait for completion of any outstanding
10041 * flip using DRMs synchronization events.
10043 * Note that DM adds the affected connectors for all CRTCs in state, when that
10044 * might not seem necessary. This is because DC stream creation requires the
10045 * DC sink, which is tied to the DRM connector state. Cleaning this up should
10046 * be possible but non-trivial - a possible TODO item.
10048 * Return: -Error code if validation failed.
10050 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10051 struct drm_atomic_state *state)
10053 struct amdgpu_device *adev = drm_to_adev(dev);
10054 struct dm_atomic_state *dm_state = NULL;
10055 struct dc *dc = adev->dm.dc;
10056 struct drm_connector *connector;
10057 struct drm_connector_state *old_con_state, *new_con_state;
10058 struct drm_crtc *crtc;
10059 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10060 struct drm_plane *plane;
10061 struct drm_plane_state *old_plane_state, *new_plane_state;
10062 enum dc_status status;
10064 bool lock_and_validation_needed = false;
10065 struct dm_crtc_state *dm_old_crtc_state;
10067 trace_amdgpu_dm_atomic_check_begin(state);
10069 ret = drm_atomic_helper_check_modeset(dev, state);
10073 /* Check connector changes */
10074 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10075 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10076 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10078 /* Skip connectors that are disabled or part of modeset already. */
10079 if (!old_con_state->crtc && !new_con_state->crtc)
10082 if (!new_con_state->crtc)
10085 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10086 if (IS_ERR(new_crtc_state)) {
10087 ret = PTR_ERR(new_crtc_state);
10091 if (dm_old_con_state->abm_level !=
10092 dm_new_con_state->abm_level)
10093 new_crtc_state->connectors_changed = true;
10096 #if defined(CONFIG_DRM_AMD_DC_DCN)
10097 if (dc_resource_is_dsc_encoding_supported(dc)) {
10098 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10099 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10100 ret = add_affected_mst_dsc_crtcs(state, crtc);
10107 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10108 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10110 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10111 !new_crtc_state->color_mgmt_changed &&
10112 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10113 dm_old_crtc_state->dsc_force_changed == false)
10116 if (!new_crtc_state->enable)
10119 ret = drm_atomic_add_affected_connectors(state, crtc);
10123 ret = drm_atomic_add_affected_planes(state, crtc);
10127 if (dm_old_crtc_state->dsc_force_changed)
10128 new_crtc_state->mode_changed = true;
10132 * Add all primary and overlay planes on the CRTC to the state
10133 * whenever a plane is enabled to maintain correct z-ordering
10134 * and to enable fast surface updates.
10136 drm_for_each_crtc(crtc, dev) {
10137 bool modified = false;
10139 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10140 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10143 if (new_plane_state->crtc == crtc ||
10144 old_plane_state->crtc == crtc) {
10153 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10154 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10158 drm_atomic_get_plane_state(state, plane);
10160 if (IS_ERR(new_plane_state)) {
10161 ret = PTR_ERR(new_plane_state);
10167 /* Remove exiting planes if they are modified */
10168 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10169 ret = dm_update_plane_state(dc, state, plane,
10173 &lock_and_validation_needed);
10178 /* Disable all crtcs which require disable */
10179 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10180 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10184 &lock_and_validation_needed);
10189 /* Enable all crtcs which require enable */
10190 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10191 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10195 &lock_and_validation_needed);
10200 ret = validate_overlay(state);
10204 /* Add new/modified planes */
10205 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10206 ret = dm_update_plane_state(dc, state, plane,
10210 &lock_and_validation_needed);
10215 /* Run this here since we want to validate the streams we created */
10216 ret = drm_atomic_helper_check_planes(dev, state);
10220 /* Check cursor planes scaling */
10221 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10222 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10227 if (state->legacy_cursor_update) {
10229 * This is a fast cursor update coming from the plane update
10230 * helper, check if it can be done asynchronously for better
10233 state->async_update =
10234 !drm_atomic_helper_async_check(dev, state);
10237 * Skip the remaining global validation if this is an async
10238 * update. Cursor updates can be done without affecting
10239 * state or bandwidth calcs and this avoids the performance
10240 * penalty of locking the private state object and
10241 * allocating a new dc_state.
10243 if (state->async_update)
10247 /* Check scaling and underscan changes*/
10248 /* TODO Removed scaling changes validation due to inability to commit
10249 * new stream into context w\o causing full reset. Need to
10250 * decide how to handle.
10252 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10253 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10254 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10255 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10257 /* Skip any modesets/resets */
10258 if (!acrtc || drm_atomic_crtc_needs_modeset(
10259 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10262 /* Skip any thing not scale or underscan changes */
10263 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10266 lock_and_validation_needed = true;
10270 * Streams and planes are reset when there are changes that affect
10271 * bandwidth. Anything that affects bandwidth needs to go through
10272 * DC global validation to ensure that the configuration can be applied
10275 * We have to currently stall out here in atomic_check for outstanding
10276 * commits to finish in this case because our IRQ handlers reference
10277 * DRM state directly - we can end up disabling interrupts too early
10280 * TODO: Remove this stall and drop DM state private objects.
10282 if (lock_and_validation_needed) {
10283 ret = dm_atomic_get_state(state, &dm_state);
10287 ret = do_aquire_global_lock(dev, state);
10291 #if defined(CONFIG_DRM_AMD_DC_DCN)
10292 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
10295 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
10301 * Perform validation of MST topology in the state:
10302 * We need to perform MST atomic check before calling
10303 * dc_validate_global_state(), or there is a chance
10304 * to get stuck in an infinite loop and hang eventually.
10306 ret = drm_dp_mst_atomic_check(state);
10309 status = dc_validate_global_state(dc, dm_state->context, false);
10310 if (status != DC_OK) {
10311 DC_LOG_WARNING("DC global validation failure: %s (%d)",
10312 dc_status_to_str(status), status);
10318 * The commit is a fast update. Fast updates shouldn't change
10319 * the DC context, affect global validation, and can have their
10320 * commit work done in parallel with other commits not touching
10321 * the same resource. If we have a new DC context as part of
10322 * the DM atomic state from validation we need to free it and
10323 * retain the existing one instead.
10325 * Furthermore, since the DM atomic state only contains the DC
10326 * context and can safely be annulled, we can free the state
10327 * and clear the associated private object now to free
10328 * some memory and avoid a possible use-after-free later.
10331 for (i = 0; i < state->num_private_objs; i++) {
10332 struct drm_private_obj *obj = state->private_objs[i].ptr;
10334 if (obj->funcs == adev->dm.atomic_obj.funcs) {
10335 int j = state->num_private_objs-1;
10337 dm_atomic_destroy_state(obj,
10338 state->private_objs[i].state);
10340 /* If i is not at the end of the array then the
10341 * last element needs to be moved to where i was
10342 * before the array can safely be truncated.
10345 state->private_objs[i] =
10346 state->private_objs[j];
10348 state->private_objs[j].ptr = NULL;
10349 state->private_objs[j].state = NULL;
10350 state->private_objs[j].old_state = NULL;
10351 state->private_objs[j].new_state = NULL;
10353 state->num_private_objs = j;
10359 /* Store the overall update type for use later in atomic check. */
10360 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10361 struct dm_crtc_state *dm_new_crtc_state =
10362 to_dm_crtc_state(new_crtc_state);
10364 dm_new_crtc_state->update_type = lock_and_validation_needed ?
10369 /* Must be success */
10372 trace_amdgpu_dm_atomic_check_finish(state, ret);
10377 if (ret == -EDEADLK)
10378 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
10379 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
10380 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
10382 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
10384 trace_amdgpu_dm_atomic_check_finish(state, ret);
10389 static bool is_dp_capable_without_timing_msa(struct dc *dc,
10390 struct amdgpu_dm_connector *amdgpu_dm_connector)
10393 bool capable = false;
10395 if (amdgpu_dm_connector->dc_link &&
10396 dm_helpers_dp_read_dpcd(
10398 amdgpu_dm_connector->dc_link,
10399 DP_DOWN_STREAM_PORT_COUNT,
10401 sizeof(dpcd_data))) {
10402 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10408 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10409 uint8_t *edid_ext, int len,
10410 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10413 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10414 struct dc *dc = adev->dm.dc;
10416 /* send extension block to DMCU for parsing */
10417 for (i = 0; i < len; i += 8) {
10421 /* send 8 bytes a time */
10422 if (!dc_edid_parser_send_cea(dc, i, len, &edid_ext[i], 8))
10426 /* EDID block sent completed, expect result */
10427 int version, min_rate, max_rate;
10429 res = dc_edid_parser_recv_amd_vsdb(dc, &version, &min_rate, &max_rate);
10431 /* amd vsdb found */
10432 vsdb_info->freesync_supported = 1;
10433 vsdb_info->amd_vsdb_version = version;
10434 vsdb_info->min_refresh_rate_hz = min_rate;
10435 vsdb_info->max_refresh_rate_hz = max_rate;
10443 res = dc_edid_parser_recv_cea_ack(dc, &offset);
10451 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10452 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10454 uint8_t *edid_ext = NULL;
10456 bool valid_vsdb_found = false;
10458 /*----- drm_find_cea_extension() -----*/
10459 /* No EDID or EDID extensions */
10460 if (edid == NULL || edid->extensions == 0)
10463 /* Find CEA extension */
10464 for (i = 0; i < edid->extensions; i++) {
10465 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10466 if (edid_ext[0] == CEA_EXT)
10470 if (i == edid->extensions)
10473 /*----- cea_db_offsets() -----*/
10474 if (edid_ext[0] != CEA_EXT)
10477 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
10479 return valid_vsdb_found ? i : -ENODEV;
10482 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10486 struct detailed_timing *timing;
10487 struct detailed_non_pixel *data;
10488 struct detailed_data_monitor_range *range;
10489 struct amdgpu_dm_connector *amdgpu_dm_connector =
10490 to_amdgpu_dm_connector(connector);
10491 struct dm_connector_state *dm_con_state = NULL;
10493 struct drm_device *dev = connector->dev;
10494 struct amdgpu_device *adev = drm_to_adev(dev);
10495 bool freesync_capable = false;
10496 struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
10498 if (!connector->state) {
10499 DRM_ERROR("%s - Connector has no state", __func__);
10504 dm_con_state = to_dm_connector_state(connector->state);
10506 amdgpu_dm_connector->min_vfreq = 0;
10507 amdgpu_dm_connector->max_vfreq = 0;
10508 amdgpu_dm_connector->pixel_clock_mhz = 0;
10513 dm_con_state = to_dm_connector_state(connector->state);
10515 if (!amdgpu_dm_connector->dc_sink) {
10516 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
10519 if (!adev->dm.freesync_module)
10523 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10524 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
10525 bool edid_check_required = false;
10528 edid_check_required = is_dp_capable_without_timing_msa(
10530 amdgpu_dm_connector);
10533 if (edid_check_required == true && (edid->version > 1 ||
10534 (edid->version == 1 && edid->revision > 1))) {
10535 for (i = 0; i < 4; i++) {
10537 timing = &edid->detailed_timings[i];
10538 data = &timing->data.other_data;
10539 range = &data->data.range;
10541 * Check if monitor has continuous frequency mode
10543 if (data->type != EDID_DETAIL_MONITOR_RANGE)
10546 * Check for flag range limits only. If flag == 1 then
10547 * no additional timing information provided.
10548 * Default GTF, GTF Secondary curve and CVT are not
10551 if (range->flags != 1)
10554 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10555 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10556 amdgpu_dm_connector->pixel_clock_mhz =
10557 range->pixel_clock_mhz * 10;
10559 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10560 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10565 if (amdgpu_dm_connector->max_vfreq -
10566 amdgpu_dm_connector->min_vfreq > 10) {
10568 freesync_capable = true;
10571 } else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10572 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10573 if (i >= 0 && vsdb_info.freesync_supported) {
10574 timing = &edid->detailed_timings[i];
10575 data = &timing->data.other_data;
10577 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10578 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10579 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10580 freesync_capable = true;
10582 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10583 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10589 dm_con_state->freesync_capable = freesync_capable;
10591 if (connector->vrr_capable_property)
10592 drm_connector_set_vrr_capable_property(connector,
10596 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
10598 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
10600 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
10602 if (link->type == dc_connection_none)
10604 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
10605 dpcd_data, sizeof(dpcd_data))) {
10606 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
10608 if (dpcd_data[0] == 0) {
10609 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
10610 link->psr_settings.psr_feature_enabled = false;
10612 link->psr_settings.psr_version = DC_PSR_VERSION_1;
10613 link->psr_settings.psr_feature_enabled = true;
10616 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
10621 * amdgpu_dm_link_setup_psr() - configure psr link
10622 * @stream: stream state
10624 * Return: true if success
10626 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
10628 struct dc_link *link = NULL;
10629 struct psr_config psr_config = {0};
10630 struct psr_context psr_context = {0};
10633 if (stream == NULL)
10636 link = stream->link;
10638 psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
10640 if (psr_config.psr_version > 0) {
10641 psr_config.psr_exit_link_training_required = 0x1;
10642 psr_config.psr_frame_capture_indication_req = 0;
10643 psr_config.psr_rfb_setup_time = 0x37;
10644 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
10645 psr_config.allow_smu_optimizations = 0x0;
10647 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
10650 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
10656 * amdgpu_dm_psr_enable() - enable psr f/w
10657 * @stream: stream state
10659 * Return: true if success
10661 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
10663 struct dc_link *link = stream->link;
10664 unsigned int vsync_rate_hz = 0;
10665 struct dc_static_screen_params params = {0};
10666 /* Calculate number of static frames before generating interrupt to
10669 // Init fail safe of 2 frames static
10670 unsigned int num_frames_static = 2;
10672 DRM_DEBUG_DRIVER("Enabling psr...\n");
10674 vsync_rate_hz = div64_u64(div64_u64((
10675 stream->timing.pix_clk_100hz * 100),
10676 stream->timing.v_total),
10677 stream->timing.h_total);
10680 * Calculate number of frames such that at least 30 ms of time has
10683 if (vsync_rate_hz != 0) {
10684 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
10685 num_frames_static = (30000 / frame_time_microsec) + 1;
10688 params.triggers.cursor_update = true;
10689 params.triggers.overlay_update = true;
10690 params.triggers.surface_update = true;
10691 params.num_frames = num_frames_static;
10693 dc_stream_set_static_screen_params(link->ctx->dc,
10697 return dc_link_set_psr_allow_active(link, true, false, false);
10701 * amdgpu_dm_psr_disable() - disable psr f/w
10702 * @stream: stream state
10704 * Return: true if success
10706 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
10709 DRM_DEBUG_DRIVER("Disabling psr...\n");
10711 return dc_link_set_psr_allow_active(stream->link, false, true, false);
10715 * amdgpu_dm_psr_disable() - disable psr f/w
10716 * if psr is enabled on any stream
10718 * Return: true if success
10720 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
10722 DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
10723 return dc_set_psr_allow_active(dm->dc, false);
10726 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10728 struct amdgpu_device *adev = drm_to_adev(dev);
10729 struct dc *dc = adev->dm.dc;
10732 mutex_lock(&adev->dm.dc_lock);
10733 if (dc->current_state) {
10734 for (i = 0; i < dc->current_state->stream_count; ++i)
10735 dc->current_state->streams[i]
10736 ->triggered_crtc_reset.enabled =
10737 adev->dm.force_timing_sync;
10739 dm_enable_per_frame_crtc_master_sync(dc->current_state);
10740 dc_trigger_sync(dc, dc->current_state);
10742 mutex_unlock(&adev->dm.dc_lock);
10745 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10746 uint32_t value, const char *func_name)
10748 #ifdef DM_CHECK_ADDR_0
10749 if (address == 0) {
10750 DC_ERR("invalid register write. address = 0");
10754 cgs_write_register(ctx->cgs_device, address, value);
10755 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10758 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10759 const char *func_name)
10762 #ifdef DM_CHECK_ADDR_0
10763 if (address == 0) {
10764 DC_ERR("invalid register read; address = 0\n");
10769 if (ctx->dmub_srv &&
10770 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10771 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10776 value = cgs_read_register(ctx->cgs_device, address);
10778 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10783 int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int linkIndex,
10784 struct aux_payload *payload, enum aux_return_code_type *operation_result)
10786 struct amdgpu_device *adev = ctx->driver_context;
10789 dc_process_dmub_aux_transfer_async(ctx->dc, linkIndex, payload);
10790 ret = wait_for_completion_interruptible_timeout(&adev->dm.dmub_aux_transfer_done, 10*HZ);
10792 *operation_result = AUX_RET_ERROR_TIMEOUT;
10795 *operation_result = (enum aux_return_code_type)adev->dm.dmub_notify->result;
10797 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
10798 (*payload->reply) = adev->dm.dmub_notify->aux_reply.command;
10800 // For read case, Copy data to payload
10801 if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
10802 (*payload->reply == AUX_TRANSACTION_REPLY_AUX_ACK))
10803 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
10804 adev->dm.dmub_notify->aux_reply.length);
10807 return adev->dm.dmub_notify->aux_reply.length;