2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
29 #include "dm_services_types.h"
31 #include "dc_link_dp.h"
32 #include "dc/inc/core_types.h"
33 #include "dal_asic_id.h"
34 #include "dmub/dmub_srv.h"
35 #include "dc/inc/hw/dmcu.h"
36 #include "dc/inc/hw/abm.h"
37 #include "dc/dc_dmub_srv.h"
38 #include "dc/dc_edid_parser.h"
39 #include "dc/dc_stat.h"
40 #include "amdgpu_dm_trace.h"
44 #include "amdgpu_display.h"
45 #include "amdgpu_ucode.h"
47 #include "amdgpu_dm.h"
48 #ifdef CONFIG_DRM_AMD_DC_HDCP
49 #include "amdgpu_dm_hdcp.h"
50 #include <drm/drm_hdcp.h>
52 #include "amdgpu_pm.h"
54 #include "amd_shared.h"
55 #include "amdgpu_dm_irq.h"
56 #include "dm_helpers.h"
57 #include "amdgpu_dm_mst_types.h"
58 #if defined(CONFIG_DEBUG_FS)
59 #include "amdgpu_dm_debugfs.h"
61 #include "amdgpu_dm_psr.h"
63 #include "ivsrcid/ivsrcid_vislands30.h"
65 #include "i2caux_interface.h"
66 #include <linux/module.h>
67 #include <linux/moduleparam.h>
68 #include <linux/types.h>
69 #include <linux/pm_runtime.h>
70 #include <linux/pci.h>
71 #include <linux/firmware.h>
72 #include <linux/component.h>
73 #include <linux/dmi.h>
75 #include <drm/drm_atomic.h>
76 #include <drm/drm_atomic_uapi.h>
77 #include <drm/drm_atomic_helper.h>
78 #include <drm/drm_dp_mst_helper.h>
79 #include <drm/drm_fb_helper.h>
80 #include <drm/drm_fourcc.h>
81 #include <drm/drm_edid.h>
82 #include <drm/drm_vblank.h>
83 #include <drm/drm_audio_component.h>
85 #if defined(CONFIG_DRM_AMD_DC_DCN)
86 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
88 #include "dcn/dcn_1_0_offset.h"
89 #include "dcn/dcn_1_0_sh_mask.h"
90 #include "soc15_hw_ip.h"
91 #include "vega10_ip_offset.h"
93 #include "soc15_common.h"
96 #include "modules/inc/mod_freesync.h"
97 #include "modules/power/power_helpers.h"
98 #include "modules/inc/mod_info_packet.h"
100 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
102 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
103 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
104 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
105 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
106 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
107 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
108 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
109 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
110 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
111 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
112 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
113 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
114 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
115 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
117 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
118 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
120 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
121 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
123 /* Number of bytes in PSP header for firmware. */
124 #define PSP_HEADER_BYTES 0x100
126 /* Number of bytes in PSP footer for firmware. */
127 #define PSP_FOOTER_BYTES 0x100
132 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
133 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
134 * requests into DC requests, and DC responses into DRM responses.
136 * The root control structure is &struct amdgpu_display_manager.
139 /* basic init/fini API */
140 static int amdgpu_dm_init(struct amdgpu_device *adev);
141 static void amdgpu_dm_fini(struct amdgpu_device *adev);
142 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
144 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
146 switch (link->dpcd_caps.dongle_type) {
147 case DISPLAY_DONGLE_NONE:
148 return DRM_MODE_SUBCONNECTOR_Native;
149 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
150 return DRM_MODE_SUBCONNECTOR_VGA;
151 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
152 case DISPLAY_DONGLE_DP_DVI_DONGLE:
153 return DRM_MODE_SUBCONNECTOR_DVID;
154 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
155 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
156 return DRM_MODE_SUBCONNECTOR_HDMIA;
157 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
159 return DRM_MODE_SUBCONNECTOR_Unknown;
163 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
165 struct dc_link *link = aconnector->dc_link;
166 struct drm_connector *connector = &aconnector->base;
167 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
169 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
172 if (aconnector->dc_sink)
173 subconnector = get_subconnector_type(link);
175 drm_object_property_set_value(&connector->base,
176 connector->dev->mode_config.dp_subconnector_property,
181 * initializes drm_device display related structures, based on the information
182 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
183 * drm_encoder, drm_mode_config
185 * Returns 0 on success
187 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
188 /* removes and deallocates the drm structures, created by the above function */
189 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
191 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
192 struct drm_plane *plane,
193 unsigned long possible_crtcs,
194 const struct dc_plane_cap *plane_cap);
195 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
196 struct drm_plane *plane,
197 uint32_t link_index);
198 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
199 struct amdgpu_dm_connector *amdgpu_dm_connector,
201 struct amdgpu_encoder *amdgpu_encoder);
202 static int amdgpu_dm_encoder_init(struct drm_device *dev,
203 struct amdgpu_encoder *aencoder,
204 uint32_t link_index);
206 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
208 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
210 static int amdgpu_dm_atomic_check(struct drm_device *dev,
211 struct drm_atomic_state *state);
213 static void handle_cursor_update(struct drm_plane *plane,
214 struct drm_plane_state *old_plane_state);
216 static const struct drm_format_info *
217 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
219 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
222 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
223 struct drm_crtc_state *new_crtc_state);
225 * dm_vblank_get_counter
228 * Get counter for number of vertical blanks
231 * struct amdgpu_device *adev - [in] desired amdgpu device
232 * int disp_idx - [in] which CRTC to get the counter from
235 * Counter for vertical blanks
237 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
239 if (crtc >= adev->mode_info.num_crtc)
242 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
244 if (acrtc->dm_irq_params.stream == NULL) {
245 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
250 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
254 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
255 u32 *vbl, u32 *position)
257 uint32_t v_blank_start, v_blank_end, h_position, v_position;
259 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
262 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
264 if (acrtc->dm_irq_params.stream == NULL) {
265 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
271 * TODO rework base driver to use values directly.
272 * for now parse it back into reg-format
274 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
280 *position = v_position | (h_position << 16);
281 *vbl = v_blank_start | (v_blank_end << 16);
287 static bool dm_is_idle(void *handle)
293 static int dm_wait_for_idle(void *handle)
299 static bool dm_check_soft_reset(void *handle)
304 static int dm_soft_reset(void *handle)
310 static struct amdgpu_crtc *
311 get_crtc_by_otg_inst(struct amdgpu_device *adev,
314 struct drm_device *dev = adev_to_drm(adev);
315 struct drm_crtc *crtc;
316 struct amdgpu_crtc *amdgpu_crtc;
318 if (WARN_ON(otg_inst == -1))
319 return adev->mode_info.crtcs[0];
321 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
322 amdgpu_crtc = to_amdgpu_crtc(crtc);
324 if (amdgpu_crtc->otg_inst == otg_inst)
331 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
333 return acrtc->dm_irq_params.freesync_config.state ==
334 VRR_STATE_ACTIVE_VARIABLE ||
335 acrtc->dm_irq_params.freesync_config.state ==
336 VRR_STATE_ACTIVE_FIXED;
339 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
341 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
342 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
345 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
346 struct dm_crtc_state *new_state)
348 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
350 else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
357 * dm_pflip_high_irq() - Handle pageflip interrupt
358 * @interrupt_params: ignored
360 * Handles the pageflip interrupt by notifying all interested parties
361 * that the pageflip has been completed.
363 static void dm_pflip_high_irq(void *interrupt_params)
365 struct amdgpu_crtc *amdgpu_crtc;
366 struct common_irq_params *irq_params = interrupt_params;
367 struct amdgpu_device *adev = irq_params->adev;
369 struct drm_pending_vblank_event *e;
370 uint32_t vpos, hpos, v_blank_start, v_blank_end;
373 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
375 /* IRQ could occur when in initial stage */
376 /* TODO work and BO cleanup */
377 if (amdgpu_crtc == NULL) {
378 DC_LOG_PFLIP("CRTC is null, returning.\n");
382 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
384 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
385 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
386 amdgpu_crtc->pflip_status,
387 AMDGPU_FLIP_SUBMITTED,
388 amdgpu_crtc->crtc_id,
390 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
394 /* page flip completed. */
395 e = amdgpu_crtc->event;
396 amdgpu_crtc->event = NULL;
400 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
402 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
404 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
405 &v_blank_end, &hpos, &vpos) ||
406 (vpos < v_blank_start)) {
407 /* Update to correct count and vblank timestamp if racing with
408 * vblank irq. This also updates to the correct vblank timestamp
409 * even in VRR mode, as scanout is past the front-porch atm.
411 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
413 /* Wake up userspace by sending the pageflip event with proper
414 * count and timestamp of vblank of flip completion.
417 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
419 /* Event sent, so done with vblank for this flip */
420 drm_crtc_vblank_put(&amdgpu_crtc->base);
423 /* VRR active and inside front-porch: vblank count and
424 * timestamp for pageflip event will only be up to date after
425 * drm_crtc_handle_vblank() has been executed from late vblank
426 * irq handler after start of back-porch (vline 0). We queue the
427 * pageflip event for send-out by drm_crtc_handle_vblank() with
428 * updated timestamp and count, once it runs after us.
430 * We need to open-code this instead of using the helper
431 * drm_crtc_arm_vblank_event(), as that helper would
432 * call drm_crtc_accurate_vblank_count(), which we must
433 * not call in VRR mode while we are in front-porch!
436 /* sequence will be replaced by real count during send-out. */
437 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
438 e->pipe = amdgpu_crtc->crtc_id;
440 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
444 /* Keep track of vblank of this flip for flip throttling. We use the
445 * cooked hw counter, as that one incremented at start of this vblank
446 * of pageflip completion, so last_flip_vblank is the forbidden count
447 * for queueing new pageflips if vsync + VRR is enabled.
449 amdgpu_crtc->dm_irq_params.last_flip_vblank =
450 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
452 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
453 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
455 DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
456 amdgpu_crtc->crtc_id, amdgpu_crtc,
457 vrr_active, (int) !e);
460 static void dm_vupdate_high_irq(void *interrupt_params)
462 struct common_irq_params *irq_params = interrupt_params;
463 struct amdgpu_device *adev = irq_params->adev;
464 struct amdgpu_crtc *acrtc;
465 struct drm_device *drm_dev;
466 struct drm_vblank_crtc *vblank;
467 ktime_t frame_duration_ns, previous_timestamp;
471 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
474 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
475 drm_dev = acrtc->base.dev;
476 vblank = &drm_dev->vblank[acrtc->base.index];
477 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
478 frame_duration_ns = vblank->time - previous_timestamp;
480 if (frame_duration_ns > 0) {
481 trace_amdgpu_refresh_rate_track(acrtc->base.index,
483 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
484 atomic64_set(&irq_params->previous_timestamp, vblank->time);
487 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
491 /* Core vblank handling is done here after end of front-porch in
492 * vrr mode, as vblank timestamping will give valid results
493 * while now done after front-porch. This will also deliver
494 * page-flip completion events that have been queued to us
495 * if a pageflip happened inside front-porch.
498 drm_crtc_handle_vblank(&acrtc->base);
500 /* BTR processing for pre-DCE12 ASICs */
501 if (acrtc->dm_irq_params.stream &&
502 adev->family < AMDGPU_FAMILY_AI) {
503 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
504 mod_freesync_handle_v_update(
505 adev->dm.freesync_module,
506 acrtc->dm_irq_params.stream,
507 &acrtc->dm_irq_params.vrr_params);
509 dc_stream_adjust_vmin_vmax(
511 acrtc->dm_irq_params.stream,
512 &acrtc->dm_irq_params.vrr_params.adjust);
513 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
520 * dm_crtc_high_irq() - Handles CRTC interrupt
521 * @interrupt_params: used for determining the CRTC instance
523 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
526 static void dm_crtc_high_irq(void *interrupt_params)
528 struct common_irq_params *irq_params = interrupt_params;
529 struct amdgpu_device *adev = irq_params->adev;
530 struct amdgpu_crtc *acrtc;
534 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
538 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
540 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
541 vrr_active, acrtc->dm_irq_params.active_planes);
544 * Core vblank handling at start of front-porch is only possible
545 * in non-vrr mode, as only there vblank timestamping will give
546 * valid results while done in front-porch. Otherwise defer it
547 * to dm_vupdate_high_irq after end of front-porch.
550 drm_crtc_handle_vblank(&acrtc->base);
553 * Following stuff must happen at start of vblank, for crc
554 * computation and below-the-range btr support in vrr mode.
556 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
558 /* BTR updates need to happen before VUPDATE on Vega and above. */
559 if (adev->family < AMDGPU_FAMILY_AI)
562 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
564 if (acrtc->dm_irq_params.stream &&
565 acrtc->dm_irq_params.vrr_params.supported &&
566 acrtc->dm_irq_params.freesync_config.state ==
567 VRR_STATE_ACTIVE_VARIABLE) {
568 mod_freesync_handle_v_update(adev->dm.freesync_module,
569 acrtc->dm_irq_params.stream,
570 &acrtc->dm_irq_params.vrr_params);
572 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
573 &acrtc->dm_irq_params.vrr_params.adjust);
577 * If there aren't any active_planes then DCH HUBP may be clock-gated.
578 * In that case, pageflip completion interrupts won't fire and pageflip
579 * completion events won't get delivered. Prevent this by sending
580 * pending pageflip events from here if a flip is still pending.
582 * If any planes are enabled, use dm_pflip_high_irq() instead, to
583 * avoid race conditions between flip programming and completion,
584 * which could cause too early flip completion events.
586 if (adev->family >= AMDGPU_FAMILY_RV &&
587 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
588 acrtc->dm_irq_params.active_planes == 0) {
590 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
592 drm_crtc_vblank_put(&acrtc->base);
594 acrtc->pflip_status = AMDGPU_FLIP_NONE;
597 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
600 #if defined(CONFIG_DRM_AMD_DC_DCN)
601 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
603 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
604 * DCN generation ASICs
605 * @interrupt_params: interrupt parameters
607 * Used to set crc window/read out crc value at vertical line 0 position
609 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
611 struct common_irq_params *irq_params = interrupt_params;
612 struct amdgpu_device *adev = irq_params->adev;
613 struct amdgpu_crtc *acrtc;
615 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
620 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
625 * dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command.
626 * @adev: amdgpu_device pointer
627 * @notify: dmub notification structure
629 * Dmub AUX or SET_CONFIG command completion processing callback
630 * Copies dmub notification to DM which is to be read by AUX command.
631 * issuing thread and also signals the event to wake up the thread.
633 void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
635 if (adev->dm.dmub_notify)
636 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
637 if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
638 complete(&adev->dm.dmub_aux_transfer_done);
642 * dmub_hpd_callback - DMUB HPD interrupt processing callback.
643 * @adev: amdgpu_device pointer
644 * @notify: dmub notification structure
646 * Dmub Hpd interrupt processing callback. Gets displayindex through the
647 * ink index and calls helper to do the processing.
649 void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
651 struct amdgpu_dm_connector *aconnector;
652 struct drm_connector *connector;
653 struct drm_connector_list_iter iter;
654 struct dc_link *link;
655 uint8_t link_index = 0;
656 struct drm_device *dev;
661 if (notify == NULL) {
662 DRM_ERROR("DMUB HPD callback notification was NULL");
666 if (notify->link_index > adev->dm.dc->link_count) {
667 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
671 link_index = notify->link_index;
672 link = adev->dm.dc->links[link_index];
675 drm_connector_list_iter_begin(dev, &iter);
676 drm_for_each_connector_iter(connector, &iter) {
677 aconnector = to_amdgpu_dm_connector(connector);
678 if (link && aconnector->dc_link == link) {
679 DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
680 handle_hpd_irq_helper(aconnector);
684 drm_connector_list_iter_end(&iter);
689 * register_dmub_notify_callback - Sets callback for DMUB notify
690 * @adev: amdgpu_device pointer
691 * @type: Type of dmub notification
692 * @callback: Dmub interrupt callback function
693 * @dmub_int_thread_offload: offload indicator
695 * API to register a dmub callback handler for a dmub notification
696 * Also sets indicator whether callback processing to be offloaded.
697 * to dmub interrupt handling thread
698 * Return: true if successfully registered, false if there is existing registration
700 bool register_dmub_notify_callback(struct amdgpu_device *adev, enum dmub_notification_type type,
701 dmub_notify_interrupt_callback_t callback, bool dmub_int_thread_offload)
703 if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
704 adev->dm.dmub_callback[type] = callback;
705 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
712 static void dm_handle_hpd_work(struct work_struct *work)
714 struct dmub_hpd_work *dmub_hpd_wrk;
716 dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
718 if (!dmub_hpd_wrk->dmub_notify) {
719 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
723 if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
724 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
725 dmub_hpd_wrk->dmub_notify);
731 #define DMUB_TRACE_MAX_READ 64
733 * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
734 * @interrupt_params: used for determining the Outbox instance
736 * Handles the Outbox Interrupt
739 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
741 struct dmub_notification notify;
742 struct common_irq_params *irq_params = interrupt_params;
743 struct amdgpu_device *adev = irq_params->adev;
744 struct amdgpu_display_manager *dm = &adev->dm;
745 struct dmcub_trace_buf_entry entry = { 0 };
747 struct dmub_hpd_work *dmub_hpd_wrk;
749 if (dc_enable_dmub_notifications(adev->dm.dc)) {
750 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
752 DRM_ERROR("Failed to allocate dmub_hpd_wrk");
755 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
757 if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
759 dc_stat_get_dmub_notification(adev->dm.dc, ¬ify);
760 if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
761 DRM_ERROR("DM: notify type %d larger than the array size %zu!", notify.type,
762 ARRAY_SIZE(dm->dmub_thread_offload));
765 if (dm->dmub_thread_offload[notify.type] == true) {
766 dmub_hpd_wrk->dmub_notify = ¬ify;
767 dmub_hpd_wrk->adev = adev;
768 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
770 dm->dmub_callback[notify.type](adev, ¬ify);
773 } while (notify.pending_notification);
776 DRM_ERROR("DM: Failed to receive correct outbox IRQ !");
782 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
783 trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
784 entry.param0, entry.param1);
786 DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
787 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
793 } while (count <= DMUB_TRACE_MAX_READ);
795 ASSERT(count <= DMUB_TRACE_MAX_READ);
799 static int dm_set_clockgating_state(void *handle,
800 enum amd_clockgating_state state)
805 static int dm_set_powergating_state(void *handle,
806 enum amd_powergating_state state)
811 /* Prototypes of private functions */
812 static int dm_early_init(void* handle);
814 /* Allocate memory for FBC compressed data */
815 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
817 struct drm_device *dev = connector->dev;
818 struct amdgpu_device *adev = drm_to_adev(dev);
819 struct dm_compressor_info *compressor = &adev->dm.compressor;
820 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
821 struct drm_display_mode *mode;
822 unsigned long max_size = 0;
824 if (adev->dm.dc->fbc_compressor == NULL)
827 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
830 if (compressor->bo_ptr)
834 list_for_each_entry(mode, &connector->modes, head) {
835 if (max_size < mode->htotal * mode->vtotal)
836 max_size = mode->htotal * mode->vtotal;
840 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
841 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
842 &compressor->gpu_addr, &compressor->cpu_addr);
845 DRM_ERROR("DM: Failed to initialize FBC\n");
847 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
848 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
855 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
856 int pipe, bool *enabled,
857 unsigned char *buf, int max_bytes)
859 struct drm_device *dev = dev_get_drvdata(kdev);
860 struct amdgpu_device *adev = drm_to_adev(dev);
861 struct drm_connector *connector;
862 struct drm_connector_list_iter conn_iter;
863 struct amdgpu_dm_connector *aconnector;
868 mutex_lock(&adev->dm.audio_lock);
870 drm_connector_list_iter_begin(dev, &conn_iter);
871 drm_for_each_connector_iter(connector, &conn_iter) {
872 aconnector = to_amdgpu_dm_connector(connector);
873 if (aconnector->audio_inst != port)
877 ret = drm_eld_size(connector->eld);
878 memcpy(buf, connector->eld, min(max_bytes, ret));
882 drm_connector_list_iter_end(&conn_iter);
884 mutex_unlock(&adev->dm.audio_lock);
886 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
891 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
892 .get_eld = amdgpu_dm_audio_component_get_eld,
895 static int amdgpu_dm_audio_component_bind(struct device *kdev,
896 struct device *hda_kdev, void *data)
898 struct drm_device *dev = dev_get_drvdata(kdev);
899 struct amdgpu_device *adev = drm_to_adev(dev);
900 struct drm_audio_component *acomp = data;
902 acomp->ops = &amdgpu_dm_audio_component_ops;
904 adev->dm.audio_component = acomp;
909 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
910 struct device *hda_kdev, void *data)
912 struct drm_device *dev = dev_get_drvdata(kdev);
913 struct amdgpu_device *adev = drm_to_adev(dev);
914 struct drm_audio_component *acomp = data;
918 adev->dm.audio_component = NULL;
921 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
922 .bind = amdgpu_dm_audio_component_bind,
923 .unbind = amdgpu_dm_audio_component_unbind,
926 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
933 adev->mode_info.audio.enabled = true;
935 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
937 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
938 adev->mode_info.audio.pin[i].channels = -1;
939 adev->mode_info.audio.pin[i].rate = -1;
940 adev->mode_info.audio.pin[i].bits_per_sample = -1;
941 adev->mode_info.audio.pin[i].status_bits = 0;
942 adev->mode_info.audio.pin[i].category_code = 0;
943 adev->mode_info.audio.pin[i].connected = false;
944 adev->mode_info.audio.pin[i].id =
945 adev->dm.dc->res_pool->audios[i]->inst;
946 adev->mode_info.audio.pin[i].offset = 0;
949 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
953 adev->dm.audio_registered = true;
958 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
963 if (!adev->mode_info.audio.enabled)
966 if (adev->dm.audio_registered) {
967 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
968 adev->dm.audio_registered = false;
971 /* TODO: Disable audio? */
973 adev->mode_info.audio.enabled = false;
976 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
978 struct drm_audio_component *acomp = adev->dm.audio_component;
980 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
981 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
983 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
988 static int dm_dmub_hw_init(struct amdgpu_device *adev)
990 const struct dmcub_firmware_header_v1_0 *hdr;
991 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
992 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
993 const struct firmware *dmub_fw = adev->dm.dmub_fw;
994 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
995 struct abm *abm = adev->dm.dc->res_pool->abm;
996 struct dmub_srv_hw_params hw_params;
997 enum dmub_status status;
998 const unsigned char *fw_inst_const, *fw_bss_data;
999 uint32_t i, fw_inst_const_size, fw_bss_data_size;
1000 bool has_hw_support;
1003 /* DMUB isn't supported on the ASIC. */
1007 DRM_ERROR("No framebuffer info for DMUB service.\n");
1012 /* Firmware required for DMUB support. */
1013 DRM_ERROR("No firmware provided for DMUB.\n");
1017 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1018 if (status != DMUB_STATUS_OK) {
1019 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1023 if (!has_hw_support) {
1024 DRM_INFO("DMUB unsupported on ASIC\n");
1028 /* Reset DMCUB if it was previously running - before we overwrite its memory. */
1029 status = dmub_srv_hw_reset(dmub_srv);
1030 if (status != DMUB_STATUS_OK)
1031 DRM_WARN("Error resetting DMUB HW: %d\n", status);
1033 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1035 fw_inst_const = dmub_fw->data +
1036 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1039 fw_bss_data = dmub_fw->data +
1040 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1041 le32_to_cpu(hdr->inst_const_bytes);
1043 /* Copy firmware and bios info into FB memory. */
1044 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1045 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1047 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1049 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1050 * amdgpu_ucode_init_single_fw will load dmub firmware
1051 * fw_inst_const part to cw0; otherwise, the firmware back door load
1052 * will be done by dm_dmub_hw_init
1054 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1055 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1056 fw_inst_const_size);
1059 if (fw_bss_data_size)
1060 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1061 fw_bss_data, fw_bss_data_size);
1063 /* Copy firmware bios info into FB memory. */
1064 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1067 /* Reset regions that need to be reset. */
1068 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1069 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1071 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1072 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1074 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1075 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1077 /* Initialize hardware. */
1078 memset(&hw_params, 0, sizeof(hw_params));
1079 hw_params.fb_base = adev->gmc.fb_start;
1080 hw_params.fb_offset = adev->gmc.aper_base;
1082 /* backdoor load firmware and trigger dmub running */
1083 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1084 hw_params.load_inst_const = true;
1087 hw_params.psp_version = dmcu->psp_version;
1089 for (i = 0; i < fb_info->num_fb; ++i)
1090 hw_params.fb[i] = &fb_info->fb[i];
1092 status = dmub_srv_hw_init(dmub_srv, &hw_params);
1093 if (status != DMUB_STATUS_OK) {
1094 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1098 /* Wait for firmware load to finish. */
1099 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1100 if (status != DMUB_STATUS_OK)
1101 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1103 /* Init DMCU and ABM if available. */
1105 dmcu->funcs->dmcu_init(dmcu);
1106 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1109 if (!adev->dm.dc->ctx->dmub_srv)
1110 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1111 if (!adev->dm.dc->ctx->dmub_srv) {
1112 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1116 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1117 adev->dm.dmcub_fw_version);
1122 static void dm_dmub_hw_resume(struct amdgpu_device *adev)
1124 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1125 enum dmub_status status;
1129 /* DMUB isn't supported on the ASIC. */
1133 status = dmub_srv_is_hw_init(dmub_srv, &init);
1134 if (status != DMUB_STATUS_OK)
1135 DRM_WARN("DMUB hardware init check failed: %d\n", status);
1137 if (status == DMUB_STATUS_OK && init) {
1138 /* Wait for firmware load to finish. */
1139 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1140 if (status != DMUB_STATUS_OK)
1141 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1143 /* Perform the full hardware initialization. */
1144 dm_dmub_hw_init(adev);
1148 #if defined(CONFIG_DRM_AMD_DC_DCN)
1149 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1152 uint32_t logical_addr_low;
1153 uint32_t logical_addr_high;
1154 uint32_t agp_base, agp_bot, agp_top;
1155 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1157 memset(pa_config, 0, sizeof(*pa_config));
1159 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1160 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1162 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1164 * Raven2 has a HW issue that it is unable to use the vram which
1165 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1166 * workaround that increase system aperture high address (add 1)
1167 * to get rid of the VM fault and hardware hang.
1169 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1171 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1174 agp_bot = adev->gmc.agp_start >> 24;
1175 agp_top = adev->gmc.agp_end >> 24;
1178 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1179 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1180 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1181 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1182 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1183 page_table_base.low_part = lower_32_bits(pt_base);
1185 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1186 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1188 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1189 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1190 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1192 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1193 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1194 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1196 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1197 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1198 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1200 pa_config->is_hvm_enabled = 0;
1204 #if defined(CONFIG_DRM_AMD_DC_DCN)
1205 static void vblank_control_worker(struct work_struct *work)
1207 struct vblank_control_work *vblank_work =
1208 container_of(work, struct vblank_control_work, work);
1209 struct amdgpu_display_manager *dm = vblank_work->dm;
1211 mutex_lock(&dm->dc_lock);
1213 if (vblank_work->enable)
1214 dm->active_vblank_irq_count++;
1215 else if(dm->active_vblank_irq_count)
1216 dm->active_vblank_irq_count--;
1218 dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1220 DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1222 /* Control PSR based on vblank requirements from OS */
1223 if (vblank_work->stream && vblank_work->stream->link) {
1224 if (vblank_work->enable) {
1225 if (vblank_work->stream->link->psr_settings.psr_allow_active)
1226 amdgpu_dm_psr_disable(vblank_work->stream);
1227 } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1228 !vblank_work->stream->link->psr_settings.psr_allow_active &&
1229 vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1230 amdgpu_dm_psr_enable(vblank_work->stream);
1234 mutex_unlock(&dm->dc_lock);
1236 dc_stream_release(vblank_work->stream);
1243 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1245 struct hpd_rx_irq_offload_work *offload_work;
1246 struct amdgpu_dm_connector *aconnector;
1247 struct dc_link *dc_link;
1248 struct amdgpu_device *adev;
1249 enum dc_connection_type new_connection_type = dc_connection_none;
1250 unsigned long flags;
1252 offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1253 aconnector = offload_work->offload_wq->aconnector;
1256 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1260 adev = drm_to_adev(aconnector->base.dev);
1261 dc_link = aconnector->dc_link;
1263 mutex_lock(&aconnector->hpd_lock);
1264 if (!dc_link_detect_sink(dc_link, &new_connection_type))
1265 DRM_ERROR("KMS: Failed to detect connector\n");
1266 mutex_unlock(&aconnector->hpd_lock);
1268 if (new_connection_type == dc_connection_none)
1271 if (amdgpu_in_reset(adev))
1274 mutex_lock(&adev->dm.dc_lock);
1275 if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1276 dc_link_dp_handle_automated_test(dc_link);
1277 else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1278 hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1279 dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1280 dc_link_dp_handle_link_loss(dc_link);
1281 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1282 offload_work->offload_wq->is_handling_link_loss = false;
1283 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1285 mutex_unlock(&adev->dm.dc_lock);
1288 kfree(offload_work);
1292 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1294 int max_caps = dc->caps.max_links;
1296 struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1298 hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1300 if (!hpd_rx_offload_wq)
1304 for (i = 0; i < max_caps; i++) {
1305 hpd_rx_offload_wq[i].wq =
1306 create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1308 if (hpd_rx_offload_wq[i].wq == NULL) {
1309 DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1313 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1316 return hpd_rx_offload_wq;
1319 struct amdgpu_stutter_quirk {
1327 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1328 /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1329 { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1333 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1335 const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1337 while (p && p->chip_device != 0) {
1338 if (pdev->vendor == p->chip_vendor &&
1339 pdev->device == p->chip_device &&
1340 pdev->subsystem_vendor == p->subsys_vendor &&
1341 pdev->subsystem_device == p->subsys_device &&
1342 pdev->revision == p->revision) {
1350 static const struct dmi_system_id hpd_disconnect_quirk_table[] = {
1353 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1354 DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
1359 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1360 DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
1365 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1366 DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
1372 static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
1374 const struct dmi_system_id *dmi_id;
1376 dm->aux_hpd_discon_quirk = false;
1378 dmi_id = dmi_first_match(hpd_disconnect_quirk_table);
1380 dm->aux_hpd_discon_quirk = true;
1381 DRM_INFO("aux_hpd_discon_quirk attached\n");
1385 static int amdgpu_dm_init(struct amdgpu_device *adev)
1387 struct dc_init_data init_data;
1388 #ifdef CONFIG_DRM_AMD_DC_HDCP
1389 struct dc_callback_init init_params;
1393 adev->dm.ddev = adev_to_drm(adev);
1394 adev->dm.adev = adev;
1396 /* Zero all the fields */
1397 memset(&init_data, 0, sizeof(init_data));
1398 #ifdef CONFIG_DRM_AMD_DC_HDCP
1399 memset(&init_params, 0, sizeof(init_params));
1402 mutex_init(&adev->dm.dc_lock);
1403 mutex_init(&adev->dm.audio_lock);
1404 #if defined(CONFIG_DRM_AMD_DC_DCN)
1405 spin_lock_init(&adev->dm.vblank_lock);
1408 if(amdgpu_dm_irq_init(adev)) {
1409 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1413 init_data.asic_id.chip_family = adev->family;
1415 init_data.asic_id.pci_revision_id = adev->pdev->revision;
1416 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1417 init_data.asic_id.chip_id = adev->pdev->device;
1419 init_data.asic_id.vram_width = adev->gmc.vram_width;
1420 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1421 init_data.asic_id.atombios_base_address =
1422 adev->mode_info.atom_context->bios;
1424 init_data.driver = adev;
1426 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1428 if (!adev->dm.cgs_device) {
1429 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1433 init_data.cgs_device = adev->dm.cgs_device;
1435 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1437 switch (adev->asic_type) {
1442 init_data.flags.gpu_vm_support = true;
1443 switch (adev->dm.dmcub_fw_version) {
1444 case 0: /* development */
1445 case 0x1: /* linux-firmware.git hash 6d9f399 */
1446 case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1447 init_data.flags.disable_dmcu = false;
1450 init_data.flags.disable_dmcu = true;
1454 case CHIP_YELLOW_CARP:
1455 init_data.flags.gpu_vm_support = true;
1461 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1462 init_data.flags.fbc_support = true;
1464 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1465 init_data.flags.multi_mon_pp_mclk_switch = true;
1467 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1468 init_data.flags.disable_fractional_pwm = true;
1470 if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1471 init_data.flags.edp_no_power_sequencing = true;
1473 init_data.flags.power_down_display_on_boot = true;
1475 INIT_LIST_HEAD(&adev->dm.da_list);
1477 retrieve_dmi_info(&adev->dm);
1479 /* Display Core create. */
1480 adev->dm.dc = dc_create(&init_data);
1483 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1485 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1489 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1490 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1491 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1494 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1495 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1496 if (dm_should_disable_stutter(adev->pdev))
1497 adev->dm.dc->debug.disable_stutter = true;
1499 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1500 adev->dm.dc->debug.disable_stutter = true;
1502 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1503 adev->dm.dc->debug.disable_dsc = true;
1505 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1506 adev->dm.dc->debug.disable_clock_gate = true;
1508 r = dm_dmub_hw_init(adev);
1510 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1514 dc_hardware_init(adev->dm.dc);
1516 adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1517 if (!adev->dm.hpd_rx_offload_wq) {
1518 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1522 #if defined(CONFIG_DRM_AMD_DC_DCN)
1523 if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1524 struct dc_phy_addr_space_config pa_config;
1526 mmhub_read_system_context(adev, &pa_config);
1528 // Call the DC init_memory func
1529 dc_setup_system_context(adev->dm.dc, &pa_config);
1533 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1534 if (!adev->dm.freesync_module) {
1536 "amdgpu: failed to initialize freesync_module.\n");
1538 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1539 adev->dm.freesync_module);
1541 amdgpu_dm_init_color_mod();
1543 #if defined(CONFIG_DRM_AMD_DC_DCN)
1544 if (adev->dm.dc->caps.max_links > 0) {
1545 adev->dm.vblank_control_workqueue =
1546 create_singlethread_workqueue("dm_vblank_control_workqueue");
1547 if (!adev->dm.vblank_control_workqueue)
1548 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1552 #ifdef CONFIG_DRM_AMD_DC_HDCP
1553 if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1554 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1556 if (!adev->dm.hdcp_workqueue)
1557 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1559 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1561 dc_init_callbacks(adev->dm.dc, &init_params);
1564 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1565 adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1567 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1568 init_completion(&adev->dm.dmub_aux_transfer_done);
1569 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1570 if (!adev->dm.dmub_notify) {
1571 DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1575 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1576 if (!adev->dm.delayed_hpd_wq) {
1577 DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1581 amdgpu_dm_outbox_init(adev);
1582 #if defined(CONFIG_DRM_AMD_DC_DCN)
1583 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1584 dmub_aux_setconfig_callback, false)) {
1585 DRM_ERROR("amdgpu: fail to register dmub aux callback");
1588 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1589 DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1595 if (amdgpu_dm_initialize_drm_device(adev)) {
1597 "amdgpu: failed to initialize sw for display support.\n");
1601 /* create fake encoders for MST */
1602 dm_dp_create_fake_mst_encoders(adev);
1604 /* TODO: Add_display_info? */
1606 /* TODO use dynamic cursor width */
1607 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1608 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1610 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1612 "amdgpu: failed to initialize sw for display support.\n");
1617 DRM_DEBUG_DRIVER("KMS initialized.\n");
1621 amdgpu_dm_fini(adev);
1626 static int amdgpu_dm_early_fini(void *handle)
1628 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1630 amdgpu_dm_audio_fini(adev);
1635 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1639 #if defined(CONFIG_DRM_AMD_DC_DCN)
1640 if (adev->dm.vblank_control_workqueue) {
1641 destroy_workqueue(adev->dm.vblank_control_workqueue);
1642 adev->dm.vblank_control_workqueue = NULL;
1646 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1647 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1650 amdgpu_dm_destroy_drm_device(&adev->dm);
1652 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1653 if (adev->dm.crc_rd_wrk) {
1654 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1655 kfree(adev->dm.crc_rd_wrk);
1656 adev->dm.crc_rd_wrk = NULL;
1659 #ifdef CONFIG_DRM_AMD_DC_HDCP
1660 if (adev->dm.hdcp_workqueue) {
1661 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1662 adev->dm.hdcp_workqueue = NULL;
1666 dc_deinit_callbacks(adev->dm.dc);
1669 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1671 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1672 kfree(adev->dm.dmub_notify);
1673 adev->dm.dmub_notify = NULL;
1674 destroy_workqueue(adev->dm.delayed_hpd_wq);
1675 adev->dm.delayed_hpd_wq = NULL;
1678 if (adev->dm.dmub_bo)
1679 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1680 &adev->dm.dmub_bo_gpu_addr,
1681 &adev->dm.dmub_bo_cpu_addr);
1683 /* DC Destroy TODO: Replace destroy DAL */
1685 dc_destroy(&adev->dm.dc);
1687 * TODO: pageflip, vlank interrupt
1689 * amdgpu_dm_irq_fini(adev);
1692 if (adev->dm.cgs_device) {
1693 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1694 adev->dm.cgs_device = NULL;
1696 if (adev->dm.freesync_module) {
1697 mod_freesync_destroy(adev->dm.freesync_module);
1698 adev->dm.freesync_module = NULL;
1701 if (adev->dm.hpd_rx_offload_wq) {
1702 for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1703 if (adev->dm.hpd_rx_offload_wq[i].wq) {
1704 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1705 adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1709 kfree(adev->dm.hpd_rx_offload_wq);
1710 adev->dm.hpd_rx_offload_wq = NULL;
1713 mutex_destroy(&adev->dm.audio_lock);
1714 mutex_destroy(&adev->dm.dc_lock);
1719 static int load_dmcu_fw(struct amdgpu_device *adev)
1721 const char *fw_name_dmcu = NULL;
1723 const struct dmcu_firmware_header_v1_0 *hdr;
1725 switch(adev->asic_type) {
1726 #if defined(CONFIG_DRM_AMD_DC_SI)
1741 case CHIP_POLARIS11:
1742 case CHIP_POLARIS10:
1743 case CHIP_POLARIS12:
1751 case CHIP_SIENNA_CICHLID:
1752 case CHIP_NAVY_FLOUNDER:
1753 case CHIP_DIMGREY_CAVEFISH:
1754 case CHIP_BEIGE_GOBY:
1756 case CHIP_YELLOW_CARP:
1759 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1762 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1763 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1764 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1765 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1770 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1774 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1775 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1779 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1781 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1782 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1783 adev->dm.fw_dmcu = NULL;
1787 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1792 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1794 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1796 release_firmware(adev->dm.fw_dmcu);
1797 adev->dm.fw_dmcu = NULL;
1801 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1802 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1803 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1804 adev->firmware.fw_size +=
1805 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1807 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1808 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1809 adev->firmware.fw_size +=
1810 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1812 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1814 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1819 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1821 struct amdgpu_device *adev = ctx;
1823 return dm_read_reg(adev->dm.dc->ctx, address);
1826 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1829 struct amdgpu_device *adev = ctx;
1831 return dm_write_reg(adev->dm.dc->ctx, address, value);
1834 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1836 struct dmub_srv_create_params create_params;
1837 struct dmub_srv_region_params region_params;
1838 struct dmub_srv_region_info region_info;
1839 struct dmub_srv_fb_params fb_params;
1840 struct dmub_srv_fb_info *fb_info;
1841 struct dmub_srv *dmub_srv;
1842 const struct dmcub_firmware_header_v1_0 *hdr;
1843 const char *fw_name_dmub;
1844 enum dmub_asic dmub_asic;
1845 enum dmub_status status;
1848 switch (adev->asic_type) {
1850 dmub_asic = DMUB_ASIC_DCN21;
1851 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1852 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1853 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1855 case CHIP_SIENNA_CICHLID:
1856 dmub_asic = DMUB_ASIC_DCN30;
1857 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1859 case CHIP_NAVY_FLOUNDER:
1860 dmub_asic = DMUB_ASIC_DCN30;
1861 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1864 dmub_asic = DMUB_ASIC_DCN301;
1865 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1867 case CHIP_DIMGREY_CAVEFISH:
1868 dmub_asic = DMUB_ASIC_DCN302;
1869 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1871 case CHIP_BEIGE_GOBY:
1872 dmub_asic = DMUB_ASIC_DCN303;
1873 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1875 case CHIP_YELLOW_CARP:
1876 dmub_asic = DMUB_ASIC_DCN31;
1877 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1881 /* ASIC doesn't support DMUB. */
1885 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1887 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1891 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1893 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1897 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1898 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1900 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1901 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1902 AMDGPU_UCODE_ID_DMCUB;
1903 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1905 adev->firmware.fw_size +=
1906 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1908 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1909 adev->dm.dmcub_fw_version);
1913 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1914 dmub_srv = adev->dm.dmub_srv;
1917 DRM_ERROR("Failed to allocate DMUB service!\n");
1921 memset(&create_params, 0, sizeof(create_params));
1922 create_params.user_ctx = adev;
1923 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1924 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1925 create_params.asic = dmub_asic;
1927 /* Create the DMUB service. */
1928 status = dmub_srv_create(dmub_srv, &create_params);
1929 if (status != DMUB_STATUS_OK) {
1930 DRM_ERROR("Error creating DMUB service: %d\n", status);
1934 /* Calculate the size of all the regions for the DMUB service. */
1935 memset(®ion_params, 0, sizeof(region_params));
1937 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1938 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1939 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1940 region_params.vbios_size = adev->bios_size;
1941 region_params.fw_bss_data = region_params.bss_data_size ?
1942 adev->dm.dmub_fw->data +
1943 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1944 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1945 region_params.fw_inst_const =
1946 adev->dm.dmub_fw->data +
1947 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1950 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
1953 if (status != DMUB_STATUS_OK) {
1954 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1959 * Allocate a framebuffer based on the total size of all the regions.
1960 * TODO: Move this into GART.
1962 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1963 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1964 &adev->dm.dmub_bo_gpu_addr,
1965 &adev->dm.dmub_bo_cpu_addr);
1969 /* Rebase the regions on the framebuffer address. */
1970 memset(&fb_params, 0, sizeof(fb_params));
1971 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1972 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1973 fb_params.region_info = ®ion_info;
1975 adev->dm.dmub_fb_info =
1976 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1977 fb_info = adev->dm.dmub_fb_info;
1981 "Failed to allocate framebuffer info for DMUB service!\n");
1985 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1986 if (status != DMUB_STATUS_OK) {
1987 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1994 static int dm_sw_init(void *handle)
1996 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1999 r = dm_dmub_sw_init(adev);
2003 return load_dmcu_fw(adev);
2006 static int dm_sw_fini(void *handle)
2008 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2010 kfree(adev->dm.dmub_fb_info);
2011 adev->dm.dmub_fb_info = NULL;
2013 if (adev->dm.dmub_srv) {
2014 dmub_srv_destroy(adev->dm.dmub_srv);
2015 adev->dm.dmub_srv = NULL;
2018 release_firmware(adev->dm.dmub_fw);
2019 adev->dm.dmub_fw = NULL;
2021 release_firmware(adev->dm.fw_dmcu);
2022 adev->dm.fw_dmcu = NULL;
2027 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2029 struct amdgpu_dm_connector *aconnector;
2030 struct drm_connector *connector;
2031 struct drm_connector_list_iter iter;
2034 drm_connector_list_iter_begin(dev, &iter);
2035 drm_for_each_connector_iter(connector, &iter) {
2036 aconnector = to_amdgpu_dm_connector(connector);
2037 if (aconnector->dc_link->type == dc_connection_mst_branch &&
2038 aconnector->mst_mgr.aux) {
2039 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2041 aconnector->base.base.id);
2043 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2045 DRM_ERROR("DM_MST: Failed to start MST\n");
2046 aconnector->dc_link->type =
2047 dc_connection_single;
2052 drm_connector_list_iter_end(&iter);
2057 static int dm_late_init(void *handle)
2059 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2061 struct dmcu_iram_parameters params;
2062 unsigned int linear_lut[16];
2064 struct dmcu *dmcu = NULL;
2066 dmcu = adev->dm.dc->res_pool->dmcu;
2068 for (i = 0; i < 16; i++)
2069 linear_lut[i] = 0xFFFF * i / 15;
2072 params.backlight_ramping_override = false;
2073 params.backlight_ramping_start = 0xCCCC;
2074 params.backlight_ramping_reduction = 0xCCCCCCCC;
2075 params.backlight_lut_array_size = 16;
2076 params.backlight_lut_array = linear_lut;
2078 /* Min backlight level after ABM reduction, Don't allow below 1%
2079 * 0xFFFF x 0.01 = 0x28F
2081 params.min_abm_backlight = 0x28F;
2082 /* In the case where abm is implemented on dmcub,
2083 * dmcu object will be null.
2084 * ABM 2.4 and up are implemented on dmcub.
2087 if (!dmcu_load_iram(dmcu, params))
2089 } else if (adev->dm.dc->ctx->dmub_srv) {
2090 struct dc_link *edp_links[MAX_NUM_EDP];
2093 get_edp_links(adev->dm.dc, edp_links, &edp_num);
2094 for (i = 0; i < edp_num; i++) {
2095 if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2100 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2103 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2105 struct amdgpu_dm_connector *aconnector;
2106 struct drm_connector *connector;
2107 struct drm_connector_list_iter iter;
2108 struct drm_dp_mst_topology_mgr *mgr;
2110 bool need_hotplug = false;
2112 drm_connector_list_iter_begin(dev, &iter);
2113 drm_for_each_connector_iter(connector, &iter) {
2114 aconnector = to_amdgpu_dm_connector(connector);
2115 if (aconnector->dc_link->type != dc_connection_mst_branch ||
2116 aconnector->mst_port)
2119 mgr = &aconnector->mst_mgr;
2122 drm_dp_mst_topology_mgr_suspend(mgr);
2124 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2126 drm_dp_mst_topology_mgr_set_mst(mgr, false);
2127 need_hotplug = true;
2131 drm_connector_list_iter_end(&iter);
2134 drm_kms_helper_hotplug_event(dev);
2137 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2139 struct smu_context *smu = &adev->smu;
2142 if (!is_support_sw_smu(adev))
2145 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2146 * on window driver dc implementation.
2147 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2148 * should be passed to smu during boot up and resume from s3.
2149 * boot up: dc calculate dcn watermark clock settings within dc_create,
2150 * dcn20_resource_construct
2151 * then call pplib functions below to pass the settings to smu:
2152 * smu_set_watermarks_for_clock_ranges
2153 * smu_set_watermarks_table
2154 * navi10_set_watermarks_table
2155 * smu_write_watermarks_table
2157 * For Renoir, clock settings of dcn watermark are also fixed values.
2158 * dc has implemented different flow for window driver:
2159 * dc_hardware_init / dc_set_power_state
2164 * smu_set_watermarks_for_clock_ranges
2165 * renoir_set_watermarks_table
2166 * smu_write_watermarks_table
2169 * dc_hardware_init -> amdgpu_dm_init
2170 * dc_set_power_state --> dm_resume
2172 * therefore, this function apply to navi10/12/14 but not Renoir
2175 switch(adev->asic_type) {
2184 ret = smu_write_watermarks_table(smu);
2186 DRM_ERROR("Failed to update WMTABLE!\n");
2194 * dm_hw_init() - Initialize DC device
2195 * @handle: The base driver device containing the amdgpu_dm device.
2197 * Initialize the &struct amdgpu_display_manager device. This involves calling
2198 * the initializers of each DM component, then populating the struct with them.
2200 * Although the function implies hardware initialization, both hardware and
2201 * software are initialized here. Splitting them out to their relevant init
2202 * hooks is a future TODO item.
2204 * Some notable things that are initialized here:
2206 * - Display Core, both software and hardware
2207 * - DC modules that we need (freesync and color management)
2208 * - DRM software states
2209 * - Interrupt sources and handlers
2211 * - Debug FS entries, if enabled
2213 static int dm_hw_init(void *handle)
2215 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2216 /* Create DAL display manager */
2217 amdgpu_dm_init(adev);
2218 amdgpu_dm_hpd_init(adev);
2224 * dm_hw_fini() - Teardown DC device
2225 * @handle: The base driver device containing the amdgpu_dm device.
2227 * Teardown components within &struct amdgpu_display_manager that require
2228 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2229 * were loaded. Also flush IRQ workqueues and disable them.
2231 static int dm_hw_fini(void *handle)
2233 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2235 amdgpu_dm_hpd_fini(adev);
2237 amdgpu_dm_irq_fini(adev);
2238 amdgpu_dm_fini(adev);
2243 static int dm_enable_vblank(struct drm_crtc *crtc);
2244 static void dm_disable_vblank(struct drm_crtc *crtc);
2246 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2247 struct dc_state *state, bool enable)
2249 enum dc_irq_source irq_source;
2250 struct amdgpu_crtc *acrtc;
2254 for (i = 0; i < state->stream_count; i++) {
2255 acrtc = get_crtc_by_otg_inst(
2256 adev, state->stream_status[i].primary_otg_inst);
2258 if (acrtc && state->stream_status[i].plane_count != 0) {
2259 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2260 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2261 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2262 acrtc->crtc_id, enable ? "en" : "dis", rc);
2264 DRM_WARN("Failed to %s pflip interrupts\n",
2265 enable ? "enable" : "disable");
2268 rc = dm_enable_vblank(&acrtc->base);
2270 DRM_WARN("Failed to enable vblank interrupts\n");
2272 dm_disable_vblank(&acrtc->base);
2280 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2282 struct dc_state *context = NULL;
2283 enum dc_status res = DC_ERROR_UNEXPECTED;
2285 struct dc_stream_state *del_streams[MAX_PIPES];
2286 int del_streams_count = 0;
2288 memset(del_streams, 0, sizeof(del_streams));
2290 context = dc_create_state(dc);
2291 if (context == NULL)
2292 goto context_alloc_fail;
2294 dc_resource_state_copy_construct_current(dc, context);
2296 /* First remove from context all streams */
2297 for (i = 0; i < context->stream_count; i++) {
2298 struct dc_stream_state *stream = context->streams[i];
2300 del_streams[del_streams_count++] = stream;
2303 /* Remove all planes for removed streams and then remove the streams */
2304 for (i = 0; i < del_streams_count; i++) {
2305 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2306 res = DC_FAIL_DETACH_SURFACES;
2310 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2316 res = dc_validate_global_state(dc, context, false);
2319 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
2323 res = dc_commit_state(dc, context);
2326 dc_release_state(context);
2332 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2336 if (dm->hpd_rx_offload_wq) {
2337 for (i = 0; i < dm->dc->caps.max_links; i++)
2338 flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2342 static int dm_suspend(void *handle)
2344 struct amdgpu_device *adev = handle;
2345 struct amdgpu_display_manager *dm = &adev->dm;
2348 if (amdgpu_in_reset(adev)) {
2349 mutex_lock(&dm->dc_lock);
2351 #if defined(CONFIG_DRM_AMD_DC_DCN)
2352 dc_allow_idle_optimizations(adev->dm.dc, false);
2355 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2357 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2359 amdgpu_dm_commit_zero_streams(dm->dc);
2361 amdgpu_dm_irq_suspend(adev);
2363 hpd_rx_irq_work_suspend(dm);
2368 WARN_ON(adev->dm.cached_state);
2369 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2371 s3_handle_mst(adev_to_drm(adev), true);
2373 amdgpu_dm_irq_suspend(adev);
2375 hpd_rx_irq_work_suspend(dm);
2377 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2382 static struct amdgpu_dm_connector *
2383 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2384 struct drm_crtc *crtc)
2387 struct drm_connector_state *new_con_state;
2388 struct drm_connector *connector;
2389 struct drm_crtc *crtc_from_state;
2391 for_each_new_connector_in_state(state, connector, new_con_state, i) {
2392 crtc_from_state = new_con_state->crtc;
2394 if (crtc_from_state == crtc)
2395 return to_amdgpu_dm_connector(connector);
2401 static void emulated_link_detect(struct dc_link *link)
2403 struct dc_sink_init_data sink_init_data = { 0 };
2404 struct display_sink_capability sink_caps = { 0 };
2405 enum dc_edid_status edid_status;
2406 struct dc_context *dc_ctx = link->ctx;
2407 struct dc_sink *sink = NULL;
2408 struct dc_sink *prev_sink = NULL;
2410 link->type = dc_connection_none;
2411 prev_sink = link->local_sink;
2414 dc_sink_release(prev_sink);
2416 switch (link->connector_signal) {
2417 case SIGNAL_TYPE_HDMI_TYPE_A: {
2418 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2419 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2423 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2424 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2425 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2429 case SIGNAL_TYPE_DVI_DUAL_LINK: {
2430 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2431 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2435 case SIGNAL_TYPE_LVDS: {
2436 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2437 sink_caps.signal = SIGNAL_TYPE_LVDS;
2441 case SIGNAL_TYPE_EDP: {
2442 sink_caps.transaction_type =
2443 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2444 sink_caps.signal = SIGNAL_TYPE_EDP;
2448 case SIGNAL_TYPE_DISPLAY_PORT: {
2449 sink_caps.transaction_type =
2450 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2451 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2456 DC_ERROR("Invalid connector type! signal:%d\n",
2457 link->connector_signal);
2461 sink_init_data.link = link;
2462 sink_init_data.sink_signal = sink_caps.signal;
2464 sink = dc_sink_create(&sink_init_data);
2466 DC_ERROR("Failed to create sink!\n");
2470 /* dc_sink_create returns a new reference */
2471 link->local_sink = sink;
2473 edid_status = dm_helpers_read_local_edid(
2478 if (edid_status != EDID_OK)
2479 DC_ERROR("Failed to read EDID");
2483 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2484 struct amdgpu_display_manager *dm)
2487 struct dc_surface_update surface_updates[MAX_SURFACES];
2488 struct dc_plane_info plane_infos[MAX_SURFACES];
2489 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2490 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2491 struct dc_stream_update stream_update;
2495 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2498 dm_error("Failed to allocate update bundle\n");
2502 for (k = 0; k < dc_state->stream_count; k++) {
2503 bundle->stream_update.stream = dc_state->streams[k];
2505 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2506 bundle->surface_updates[m].surface =
2507 dc_state->stream_status->plane_states[m];
2508 bundle->surface_updates[m].surface->force_full_update =
2511 dc_commit_updates_for_stream(
2512 dm->dc, bundle->surface_updates,
2513 dc_state->stream_status->plane_count,
2514 dc_state->streams[k], &bundle->stream_update, dc_state);
2523 static void dm_set_dpms_off(struct dc_link *link)
2525 struct dc_stream_state *stream_state;
2526 struct amdgpu_dm_connector *aconnector = link->priv;
2527 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2528 struct dc_stream_update stream_update;
2529 bool dpms_off = true;
2531 memset(&stream_update, 0, sizeof(stream_update));
2532 stream_update.dpms_off = &dpms_off;
2534 mutex_lock(&adev->dm.dc_lock);
2535 stream_state = dc_stream_find_from_link(link);
2537 if (stream_state == NULL) {
2538 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2539 mutex_unlock(&adev->dm.dc_lock);
2543 stream_update.stream = stream_state;
2544 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2545 stream_state, &stream_update,
2546 stream_state->ctx->dc->current_state);
2547 mutex_unlock(&adev->dm.dc_lock);
2550 static int dm_resume(void *handle)
2552 struct amdgpu_device *adev = handle;
2553 struct drm_device *ddev = adev_to_drm(adev);
2554 struct amdgpu_display_manager *dm = &adev->dm;
2555 struct amdgpu_dm_connector *aconnector;
2556 struct drm_connector *connector;
2557 struct drm_connector_list_iter iter;
2558 struct drm_crtc *crtc;
2559 struct drm_crtc_state *new_crtc_state;
2560 struct dm_crtc_state *dm_new_crtc_state;
2561 struct drm_plane *plane;
2562 struct drm_plane_state *new_plane_state;
2563 struct dm_plane_state *dm_new_plane_state;
2564 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2565 enum dc_connection_type new_connection_type = dc_connection_none;
2566 struct dc_state *dc_state;
2569 if (amdgpu_in_reset(adev)) {
2570 dc_state = dm->cached_dc_state;
2572 if (dc_enable_dmub_notifications(adev->dm.dc))
2573 amdgpu_dm_outbox_init(adev);
2575 r = dm_dmub_hw_init(adev);
2577 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2579 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2582 amdgpu_dm_irq_resume_early(adev);
2584 for (i = 0; i < dc_state->stream_count; i++) {
2585 dc_state->streams[i]->mode_changed = true;
2586 for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2587 dc_state->stream_status[i].plane_states[j]->update_flags.raw
2591 #if defined(CONFIG_DRM_AMD_DC_DCN)
2593 * Resource allocation happens for link encoders for newer ASIC in
2594 * dc_validate_global_state, so we need to revalidate it.
2596 * This shouldn't fail (it passed once before), so warn if it does.
2598 WARN_ON(dc_validate_global_state(dm->dc, dc_state, false) != DC_OK);
2601 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2603 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2605 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2607 dc_release_state(dm->cached_dc_state);
2608 dm->cached_dc_state = NULL;
2610 amdgpu_dm_irq_resume_late(adev);
2612 mutex_unlock(&dm->dc_lock);
2616 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2617 dc_release_state(dm_state->context);
2618 dm_state->context = dc_create_state(dm->dc);
2619 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2620 dc_resource_state_construct(dm->dc, dm_state->context);
2622 /* Re-enable outbox interrupts for DPIA. */
2623 if (dc_enable_dmub_notifications(adev->dm.dc))
2624 amdgpu_dm_outbox_init(adev);
2626 /* Before powering on DC we need to re-initialize DMUB. */
2627 dm_dmub_hw_resume(adev);
2629 /* power on hardware */
2630 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2632 /* program HPD filter */
2636 * early enable HPD Rx IRQ, should be done before set mode as short
2637 * pulse interrupts are used for MST
2639 amdgpu_dm_irq_resume_early(adev);
2641 /* On resume we need to rewrite the MSTM control bits to enable MST*/
2642 s3_handle_mst(ddev, false);
2645 drm_connector_list_iter_begin(ddev, &iter);
2646 drm_for_each_connector_iter(connector, &iter) {
2647 aconnector = to_amdgpu_dm_connector(connector);
2650 * this is the case when traversing through already created
2651 * MST connectors, should be skipped
2653 if (aconnector->dc_link &&
2654 aconnector->dc_link->type == dc_connection_mst_branch)
2657 mutex_lock(&aconnector->hpd_lock);
2658 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2659 DRM_ERROR("KMS: Failed to detect connector\n");
2661 if (aconnector->base.force && new_connection_type == dc_connection_none)
2662 emulated_link_detect(aconnector->dc_link);
2664 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2666 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2667 aconnector->fake_enable = false;
2669 if (aconnector->dc_sink)
2670 dc_sink_release(aconnector->dc_sink);
2671 aconnector->dc_sink = NULL;
2672 amdgpu_dm_update_connector_after_detect(aconnector);
2673 mutex_unlock(&aconnector->hpd_lock);
2675 drm_connector_list_iter_end(&iter);
2677 /* Force mode set in atomic commit */
2678 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2679 new_crtc_state->active_changed = true;
2682 * atomic_check is expected to create the dc states. We need to release
2683 * them here, since they were duplicated as part of the suspend
2686 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2687 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2688 if (dm_new_crtc_state->stream) {
2689 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2690 dc_stream_release(dm_new_crtc_state->stream);
2691 dm_new_crtc_state->stream = NULL;
2695 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2696 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2697 if (dm_new_plane_state->dc_state) {
2698 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2699 dc_plane_state_release(dm_new_plane_state->dc_state);
2700 dm_new_plane_state->dc_state = NULL;
2704 drm_atomic_helper_resume(ddev, dm->cached_state);
2706 dm->cached_state = NULL;
2708 amdgpu_dm_irq_resume_late(adev);
2710 amdgpu_dm_smu_write_watermarks_table(adev);
2718 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2719 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2720 * the base driver's device list to be initialized and torn down accordingly.
2722 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2725 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2727 .early_init = dm_early_init,
2728 .late_init = dm_late_init,
2729 .sw_init = dm_sw_init,
2730 .sw_fini = dm_sw_fini,
2731 .early_fini = amdgpu_dm_early_fini,
2732 .hw_init = dm_hw_init,
2733 .hw_fini = dm_hw_fini,
2734 .suspend = dm_suspend,
2735 .resume = dm_resume,
2736 .is_idle = dm_is_idle,
2737 .wait_for_idle = dm_wait_for_idle,
2738 .check_soft_reset = dm_check_soft_reset,
2739 .soft_reset = dm_soft_reset,
2740 .set_clockgating_state = dm_set_clockgating_state,
2741 .set_powergating_state = dm_set_powergating_state,
2744 const struct amdgpu_ip_block_version dm_ip_block =
2746 .type = AMD_IP_BLOCK_TYPE_DCE,
2750 .funcs = &amdgpu_dm_funcs,
2760 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2761 .fb_create = amdgpu_display_user_framebuffer_create,
2762 .get_format_info = amd_get_format_info,
2763 .output_poll_changed = drm_fb_helper_output_poll_changed,
2764 .atomic_check = amdgpu_dm_atomic_check,
2765 .atomic_commit = drm_atomic_helper_commit,
2768 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2769 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2772 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2774 u32 max_avg, min_cll, max, min, q, r;
2775 struct amdgpu_dm_backlight_caps *caps;
2776 struct amdgpu_display_manager *dm;
2777 struct drm_connector *conn_base;
2778 struct amdgpu_device *adev;
2779 struct dc_link *link = NULL;
2780 static const u8 pre_computed_values[] = {
2781 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2782 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2785 if (!aconnector || !aconnector->dc_link)
2788 link = aconnector->dc_link;
2789 if (link->connector_signal != SIGNAL_TYPE_EDP)
2792 conn_base = &aconnector->base;
2793 adev = drm_to_adev(conn_base->dev);
2795 for (i = 0; i < dm->num_of_edps; i++) {
2796 if (link == dm->backlight_link[i])
2799 if (i >= dm->num_of_edps)
2801 caps = &dm->backlight_caps[i];
2802 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2803 caps->aux_support = false;
2804 max_avg = conn_base->hdr_sink_metadata.hdmi_type1.max_fall;
2805 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2807 if (caps->ext_caps->bits.oled == 1 /*||
2808 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2809 caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2810 caps->aux_support = true;
2812 if (amdgpu_backlight == 0)
2813 caps->aux_support = false;
2814 else if (amdgpu_backlight == 1)
2815 caps->aux_support = true;
2817 /* From the specification (CTA-861-G), for calculating the maximum
2818 * luminance we need to use:
2819 * Luminance = 50*2**(CV/32)
2820 * Where CV is a one-byte value.
2821 * For calculating this expression we may need float point precision;
2822 * to avoid this complexity level, we take advantage that CV is divided
2823 * by a constant. From the Euclids division algorithm, we know that CV
2824 * can be written as: CV = 32*q + r. Next, we replace CV in the
2825 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2826 * need to pre-compute the value of r/32. For pre-computing the values
2827 * We just used the following Ruby line:
2828 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2829 * The results of the above expressions can be verified at
2830 * pre_computed_values.
2834 max = (1 << q) * pre_computed_values[r];
2836 // min luminance: maxLum * (CV/255)^2 / 100
2837 q = DIV_ROUND_CLOSEST(min_cll, 255);
2838 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2840 caps->aux_max_input_signal = max;
2841 caps->aux_min_input_signal = min;
2844 void amdgpu_dm_update_connector_after_detect(
2845 struct amdgpu_dm_connector *aconnector)
2847 struct drm_connector *connector = &aconnector->base;
2848 struct drm_device *dev = connector->dev;
2849 struct dc_sink *sink;
2851 /* MST handled by drm_mst framework */
2852 if (aconnector->mst_mgr.mst_state == true)
2855 sink = aconnector->dc_link->local_sink;
2857 dc_sink_retain(sink);
2860 * Edid mgmt connector gets first update only in mode_valid hook and then
2861 * the connector sink is set to either fake or physical sink depends on link status.
2862 * Skip if already done during boot.
2864 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2865 && aconnector->dc_em_sink) {
2868 * For S3 resume with headless use eml_sink to fake stream
2869 * because on resume connector->sink is set to NULL
2871 mutex_lock(&dev->mode_config.mutex);
2874 if (aconnector->dc_sink) {
2875 amdgpu_dm_update_freesync_caps(connector, NULL);
2877 * retain and release below are used to
2878 * bump up refcount for sink because the link doesn't point
2879 * to it anymore after disconnect, so on next crtc to connector
2880 * reshuffle by UMD we will get into unwanted dc_sink release
2882 dc_sink_release(aconnector->dc_sink);
2884 aconnector->dc_sink = sink;
2885 dc_sink_retain(aconnector->dc_sink);
2886 amdgpu_dm_update_freesync_caps(connector,
2889 amdgpu_dm_update_freesync_caps(connector, NULL);
2890 if (!aconnector->dc_sink) {
2891 aconnector->dc_sink = aconnector->dc_em_sink;
2892 dc_sink_retain(aconnector->dc_sink);
2896 mutex_unlock(&dev->mode_config.mutex);
2899 dc_sink_release(sink);
2904 * TODO: temporary guard to look for proper fix
2905 * if this sink is MST sink, we should not do anything
2907 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2908 dc_sink_release(sink);
2912 if (aconnector->dc_sink == sink) {
2914 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2917 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2918 aconnector->connector_id);
2920 dc_sink_release(sink);
2924 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2925 aconnector->connector_id, aconnector->dc_sink, sink);
2927 mutex_lock(&dev->mode_config.mutex);
2930 * 1. Update status of the drm connector
2931 * 2. Send an event and let userspace tell us what to do
2935 * TODO: check if we still need the S3 mode update workaround.
2936 * If yes, put it here.
2938 if (aconnector->dc_sink) {
2939 amdgpu_dm_update_freesync_caps(connector, NULL);
2940 dc_sink_release(aconnector->dc_sink);
2943 aconnector->dc_sink = sink;
2944 dc_sink_retain(aconnector->dc_sink);
2945 if (sink->dc_edid.length == 0) {
2946 aconnector->edid = NULL;
2947 if (aconnector->dc_link->aux_mode) {
2948 drm_dp_cec_unset_edid(
2949 &aconnector->dm_dp_aux.aux);
2953 (struct edid *)sink->dc_edid.raw_edid;
2955 drm_connector_update_edid_property(connector,
2957 if (aconnector->dc_link->aux_mode)
2958 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2962 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2963 update_connector_ext_caps(aconnector);
2965 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2966 amdgpu_dm_update_freesync_caps(connector, NULL);
2967 drm_connector_update_edid_property(connector, NULL);
2968 aconnector->num_modes = 0;
2969 dc_sink_release(aconnector->dc_sink);
2970 aconnector->dc_sink = NULL;
2971 aconnector->edid = NULL;
2972 #ifdef CONFIG_DRM_AMD_DC_HDCP
2973 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2974 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2975 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2979 mutex_unlock(&dev->mode_config.mutex);
2981 update_subconnector_property(aconnector);
2984 dc_sink_release(sink);
2987 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
2989 struct drm_connector *connector = &aconnector->base;
2990 struct drm_device *dev = connector->dev;
2991 enum dc_connection_type new_connection_type = dc_connection_none;
2992 struct amdgpu_device *adev = drm_to_adev(dev);
2993 #ifdef CONFIG_DRM_AMD_DC_HDCP
2994 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2997 if (adev->dm.disable_hpd_irq)
3001 * In case of failure or MST no need to update connector status or notify the OS
3002 * since (for MST case) MST does this in its own context.
3004 mutex_lock(&aconnector->hpd_lock);
3006 #ifdef CONFIG_DRM_AMD_DC_HDCP
3007 if (adev->dm.hdcp_workqueue) {
3008 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3009 dm_con_state->update_hdcp = true;
3012 if (aconnector->fake_enable)
3013 aconnector->fake_enable = false;
3015 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3016 DRM_ERROR("KMS: Failed to detect connector\n");
3018 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3019 emulated_link_detect(aconnector->dc_link);
3022 drm_modeset_lock_all(dev);
3023 dm_restore_drm_connector_state(dev, connector);
3024 drm_modeset_unlock_all(dev);
3026 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3027 drm_kms_helper_hotplug_event(dev);
3029 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3030 if (new_connection_type == dc_connection_none &&
3031 aconnector->dc_link->type == dc_connection_none)
3032 dm_set_dpms_off(aconnector->dc_link);
3034 amdgpu_dm_update_connector_after_detect(aconnector);
3036 drm_modeset_lock_all(dev);
3037 dm_restore_drm_connector_state(dev, connector);
3038 drm_modeset_unlock_all(dev);
3040 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3041 drm_kms_helper_hotplug_event(dev);
3043 mutex_unlock(&aconnector->hpd_lock);
3047 static void handle_hpd_irq(void *param)
3049 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3051 handle_hpd_irq_helper(aconnector);
3055 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3057 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3059 bool new_irq_handled = false;
3061 int dpcd_bytes_to_read;
3063 const int max_process_count = 30;
3064 int process_count = 0;
3066 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3068 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3069 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3070 /* DPCD 0x200 - 0x201 for downstream IRQ */
3071 dpcd_addr = DP_SINK_COUNT;
3073 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3074 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
3075 dpcd_addr = DP_SINK_COUNT_ESI;
3078 dret = drm_dp_dpcd_read(
3079 &aconnector->dm_dp_aux.aux,
3082 dpcd_bytes_to_read);
3084 while (dret == dpcd_bytes_to_read &&
3085 process_count < max_process_count) {
3091 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3092 /* handle HPD short pulse irq */
3093 if (aconnector->mst_mgr.mst_state)
3095 &aconnector->mst_mgr,
3099 if (new_irq_handled) {
3100 /* ACK at DPCD to notify down stream */
3101 const int ack_dpcd_bytes_to_write =
3102 dpcd_bytes_to_read - 1;
3104 for (retry = 0; retry < 3; retry++) {
3107 wret = drm_dp_dpcd_write(
3108 &aconnector->dm_dp_aux.aux,
3111 ack_dpcd_bytes_to_write);
3112 if (wret == ack_dpcd_bytes_to_write)
3116 /* check if there is new irq to be handled */
3117 dret = drm_dp_dpcd_read(
3118 &aconnector->dm_dp_aux.aux,
3121 dpcd_bytes_to_read);
3123 new_irq_handled = false;
3129 if (process_count == max_process_count)
3130 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3133 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3134 union hpd_irq_data hpd_irq_data)
3136 struct hpd_rx_irq_offload_work *offload_work =
3137 kzalloc(sizeof(*offload_work), GFP_KERNEL);
3139 if (!offload_work) {
3140 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3144 INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3145 offload_work->data = hpd_irq_data;
3146 offload_work->offload_wq = offload_wq;
3148 queue_work(offload_wq->wq, &offload_work->work);
3149 DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3152 static void handle_hpd_rx_irq(void *param)
3154 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3155 struct drm_connector *connector = &aconnector->base;
3156 struct drm_device *dev = connector->dev;
3157 struct dc_link *dc_link = aconnector->dc_link;
3158 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3159 bool result = false;
3160 enum dc_connection_type new_connection_type = dc_connection_none;
3161 struct amdgpu_device *adev = drm_to_adev(dev);
3162 union hpd_irq_data hpd_irq_data;
3163 bool link_loss = false;
3164 bool has_left_work = false;
3165 int idx = aconnector->base.index;
3166 struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3168 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3170 if (adev->dm.disable_hpd_irq)
3174 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3175 * conflict, after implement i2c helper, this mutex should be
3178 mutex_lock(&aconnector->hpd_lock);
3180 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3181 &link_loss, true, &has_left_work);
3186 if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3187 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3191 if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3192 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3193 hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3194 dm_handle_mst_sideband_msg(aconnector);
3201 spin_lock(&offload_wq->offload_lock);
3202 skip = offload_wq->is_handling_link_loss;
3205 offload_wq->is_handling_link_loss = true;
3207 spin_unlock(&offload_wq->offload_lock);
3210 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3217 if (result && !is_mst_root_connector) {
3218 /* Downstream Port status changed. */
3219 if (!dc_link_detect_sink(dc_link, &new_connection_type))
3220 DRM_ERROR("KMS: Failed to detect connector\n");
3222 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3223 emulated_link_detect(dc_link);
3225 if (aconnector->fake_enable)
3226 aconnector->fake_enable = false;
3228 amdgpu_dm_update_connector_after_detect(aconnector);
3231 drm_modeset_lock_all(dev);
3232 dm_restore_drm_connector_state(dev, connector);
3233 drm_modeset_unlock_all(dev);
3235 drm_kms_helper_hotplug_event(dev);
3236 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
3238 if (aconnector->fake_enable)
3239 aconnector->fake_enable = false;
3241 amdgpu_dm_update_connector_after_detect(aconnector);
3244 drm_modeset_lock_all(dev);
3245 dm_restore_drm_connector_state(dev, connector);
3246 drm_modeset_unlock_all(dev);
3248 drm_kms_helper_hotplug_event(dev);
3251 #ifdef CONFIG_DRM_AMD_DC_HDCP
3252 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3253 if (adev->dm.hdcp_workqueue)
3254 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
3258 if (dc_link->type != dc_connection_mst_branch)
3259 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3261 mutex_unlock(&aconnector->hpd_lock);
3264 static void register_hpd_handlers(struct amdgpu_device *adev)
3266 struct drm_device *dev = adev_to_drm(adev);
3267 struct drm_connector *connector;
3268 struct amdgpu_dm_connector *aconnector;
3269 const struct dc_link *dc_link;
3270 struct dc_interrupt_params int_params = {0};
3272 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3273 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3275 list_for_each_entry(connector,
3276 &dev->mode_config.connector_list, head) {
3278 aconnector = to_amdgpu_dm_connector(connector);
3279 dc_link = aconnector->dc_link;
3281 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3282 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3283 int_params.irq_source = dc_link->irq_source_hpd;
3285 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3287 (void *) aconnector);
3290 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3292 /* Also register for DP short pulse (hpd_rx). */
3293 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3294 int_params.irq_source = dc_link->irq_source_hpd_rx;
3296 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3298 (void *) aconnector);
3300 if (adev->dm.hpd_rx_offload_wq)
3301 adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3307 #if defined(CONFIG_DRM_AMD_DC_SI)
3308 /* Register IRQ sources and initialize IRQ callbacks */
3309 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3311 struct dc *dc = adev->dm.dc;
3312 struct common_irq_params *c_irq_params;
3313 struct dc_interrupt_params int_params = {0};
3316 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3318 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3319 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3322 * Actions of amdgpu_irq_add_id():
3323 * 1. Register a set() function with base driver.
3324 * Base driver will call set() function to enable/disable an
3325 * interrupt in DC hardware.
3326 * 2. Register amdgpu_dm_irq_handler().
3327 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3328 * coming from DC hardware.
3329 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3330 * for acknowledging and handling. */
3332 /* Use VBLANK interrupt */
3333 for (i = 0; i < adev->mode_info.num_crtc; i++) {
3334 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3336 DRM_ERROR("Failed to add crtc irq id!\n");
3340 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3341 int_params.irq_source =
3342 dc_interrupt_to_irq_source(dc, i+1 , 0);
3344 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3346 c_irq_params->adev = adev;
3347 c_irq_params->irq_src = int_params.irq_source;
3349 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3350 dm_crtc_high_irq, c_irq_params);
3353 /* Use GRPH_PFLIP interrupt */
3354 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3355 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3356 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3358 DRM_ERROR("Failed to add page flip irq id!\n");
3362 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3363 int_params.irq_source =
3364 dc_interrupt_to_irq_source(dc, i, 0);
3366 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3368 c_irq_params->adev = adev;
3369 c_irq_params->irq_src = int_params.irq_source;
3371 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3372 dm_pflip_high_irq, c_irq_params);
3377 r = amdgpu_irq_add_id(adev, client_id,
3378 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3380 DRM_ERROR("Failed to add hpd irq id!\n");
3384 register_hpd_handlers(adev);
3390 /* Register IRQ sources and initialize IRQ callbacks */
3391 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3393 struct dc *dc = adev->dm.dc;
3394 struct common_irq_params *c_irq_params;
3395 struct dc_interrupt_params int_params = {0};
3398 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3400 if (adev->asic_type >= CHIP_VEGA10)
3401 client_id = SOC15_IH_CLIENTID_DCE;
3403 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3404 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3407 * Actions of amdgpu_irq_add_id():
3408 * 1. Register a set() function with base driver.
3409 * Base driver will call set() function to enable/disable an
3410 * interrupt in DC hardware.
3411 * 2. Register amdgpu_dm_irq_handler().
3412 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3413 * coming from DC hardware.
3414 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3415 * for acknowledging and handling. */
3417 /* Use VBLANK interrupt */
3418 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3419 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3421 DRM_ERROR("Failed to add crtc irq id!\n");
3425 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3426 int_params.irq_source =
3427 dc_interrupt_to_irq_source(dc, i, 0);
3429 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3431 c_irq_params->adev = adev;
3432 c_irq_params->irq_src = int_params.irq_source;
3434 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3435 dm_crtc_high_irq, c_irq_params);
3438 /* Use VUPDATE interrupt */
3439 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3440 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3442 DRM_ERROR("Failed to add vupdate irq id!\n");
3446 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3447 int_params.irq_source =
3448 dc_interrupt_to_irq_source(dc, i, 0);
3450 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3452 c_irq_params->adev = adev;
3453 c_irq_params->irq_src = int_params.irq_source;
3455 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3456 dm_vupdate_high_irq, c_irq_params);
3459 /* Use GRPH_PFLIP interrupt */
3460 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3461 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3462 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3464 DRM_ERROR("Failed to add page flip irq id!\n");
3468 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3469 int_params.irq_source =
3470 dc_interrupt_to_irq_source(dc, i, 0);
3472 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3474 c_irq_params->adev = adev;
3475 c_irq_params->irq_src = int_params.irq_source;
3477 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3478 dm_pflip_high_irq, c_irq_params);
3483 r = amdgpu_irq_add_id(adev, client_id,
3484 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3486 DRM_ERROR("Failed to add hpd irq id!\n");
3490 register_hpd_handlers(adev);
3495 #if defined(CONFIG_DRM_AMD_DC_DCN)
3496 /* Register IRQ sources and initialize IRQ callbacks */
3497 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3499 struct dc *dc = adev->dm.dc;
3500 struct common_irq_params *c_irq_params;
3501 struct dc_interrupt_params int_params = {0};
3504 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3505 static const unsigned int vrtl_int_srcid[] = {
3506 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3507 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3508 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3509 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3510 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3511 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3515 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3516 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3519 * Actions of amdgpu_irq_add_id():
3520 * 1. Register a set() function with base driver.
3521 * Base driver will call set() function to enable/disable an
3522 * interrupt in DC hardware.
3523 * 2. Register amdgpu_dm_irq_handler().
3524 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3525 * coming from DC hardware.
3526 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3527 * for acknowledging and handling.
3530 /* Use VSTARTUP interrupt */
3531 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3532 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3534 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3537 DRM_ERROR("Failed to add crtc irq id!\n");
3541 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3542 int_params.irq_source =
3543 dc_interrupt_to_irq_source(dc, i, 0);
3545 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3547 c_irq_params->adev = adev;
3548 c_irq_params->irq_src = int_params.irq_source;
3550 amdgpu_dm_irq_register_interrupt(
3551 adev, &int_params, dm_crtc_high_irq, c_irq_params);
3554 /* Use otg vertical line interrupt */
3555 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3556 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3557 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3558 vrtl_int_srcid[i], &adev->vline0_irq);
3561 DRM_ERROR("Failed to add vline0 irq id!\n");
3565 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3566 int_params.irq_source =
3567 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3569 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3570 DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3574 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3575 - DC_IRQ_SOURCE_DC1_VLINE0];
3577 c_irq_params->adev = adev;
3578 c_irq_params->irq_src = int_params.irq_source;
3580 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3581 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3585 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3586 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3587 * to trigger at end of each vblank, regardless of state of the lock,
3588 * matching DCE behaviour.
3590 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3591 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3593 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3596 DRM_ERROR("Failed to add vupdate irq id!\n");
3600 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3601 int_params.irq_source =
3602 dc_interrupt_to_irq_source(dc, i, 0);
3604 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3606 c_irq_params->adev = adev;
3607 c_irq_params->irq_src = int_params.irq_source;
3609 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3610 dm_vupdate_high_irq, c_irq_params);
3613 /* Use GRPH_PFLIP interrupt */
3614 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3615 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
3617 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3619 DRM_ERROR("Failed to add page flip irq id!\n");
3623 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3624 int_params.irq_source =
3625 dc_interrupt_to_irq_source(dc, i, 0);
3627 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3629 c_irq_params->adev = adev;
3630 c_irq_params->irq_src = int_params.irq_source;
3632 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3633 dm_pflip_high_irq, c_irq_params);
3638 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3641 DRM_ERROR("Failed to add hpd irq id!\n");
3645 register_hpd_handlers(adev);
3649 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3650 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3652 struct dc *dc = adev->dm.dc;
3653 struct common_irq_params *c_irq_params;
3654 struct dc_interrupt_params int_params = {0};
3657 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3658 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3660 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3661 &adev->dmub_outbox_irq);
3663 DRM_ERROR("Failed to add outbox irq id!\n");
3667 if (dc->ctx->dmub_srv) {
3668 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3669 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3670 int_params.irq_source =
3671 dc_interrupt_to_irq_source(dc, i, 0);
3673 c_irq_params = &adev->dm.dmub_outbox_params[0];
3675 c_irq_params->adev = adev;
3676 c_irq_params->irq_src = int_params.irq_source;
3678 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3679 dm_dmub_outbox1_low_irq, c_irq_params);
3687 * Acquires the lock for the atomic state object and returns
3688 * the new atomic state.
3690 * This should only be called during atomic check.
3692 static int dm_atomic_get_state(struct drm_atomic_state *state,
3693 struct dm_atomic_state **dm_state)
3695 struct drm_device *dev = state->dev;
3696 struct amdgpu_device *adev = drm_to_adev(dev);
3697 struct amdgpu_display_manager *dm = &adev->dm;
3698 struct drm_private_state *priv_state;
3703 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3704 if (IS_ERR(priv_state))
3705 return PTR_ERR(priv_state);
3707 *dm_state = to_dm_atomic_state(priv_state);
3712 static struct dm_atomic_state *
3713 dm_atomic_get_new_state(struct drm_atomic_state *state)
3715 struct drm_device *dev = state->dev;
3716 struct amdgpu_device *adev = drm_to_adev(dev);
3717 struct amdgpu_display_manager *dm = &adev->dm;
3718 struct drm_private_obj *obj;
3719 struct drm_private_state *new_obj_state;
3722 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3723 if (obj->funcs == dm->atomic_obj.funcs)
3724 return to_dm_atomic_state(new_obj_state);
3730 static struct drm_private_state *
3731 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3733 struct dm_atomic_state *old_state, *new_state;
3735 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3739 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3741 old_state = to_dm_atomic_state(obj->state);
3743 if (old_state && old_state->context)
3744 new_state->context = dc_copy_state(old_state->context);
3746 if (!new_state->context) {
3751 return &new_state->base;
3754 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3755 struct drm_private_state *state)
3757 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3759 if (dm_state && dm_state->context)
3760 dc_release_state(dm_state->context);
3765 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3766 .atomic_duplicate_state = dm_atomic_duplicate_state,
3767 .atomic_destroy_state = dm_atomic_destroy_state,
3770 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3772 struct dm_atomic_state *state;
3775 adev->mode_info.mode_config_initialized = true;
3777 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3778 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3780 adev_to_drm(adev)->mode_config.max_width = 16384;
3781 adev_to_drm(adev)->mode_config.max_height = 16384;
3783 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3784 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3785 /* indicates support for immediate flip */
3786 adev_to_drm(adev)->mode_config.async_page_flip = true;
3788 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3790 state = kzalloc(sizeof(*state), GFP_KERNEL);
3794 state->context = dc_create_state(adev->dm.dc);
3795 if (!state->context) {
3800 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3802 drm_atomic_private_obj_init(adev_to_drm(adev),
3803 &adev->dm.atomic_obj,
3805 &dm_atomic_state_funcs);
3807 r = amdgpu_display_modeset_create_props(adev);
3809 dc_release_state(state->context);
3814 r = amdgpu_dm_audio_init(adev);
3816 dc_release_state(state->context);
3824 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3825 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3826 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3828 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3829 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3831 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3834 #if defined(CONFIG_ACPI)
3835 struct amdgpu_dm_backlight_caps caps;
3837 memset(&caps, 0, sizeof(caps));
3839 if (dm->backlight_caps[bl_idx].caps_valid)
3842 amdgpu_acpi_get_backlight_caps(&caps);
3843 if (caps.caps_valid) {
3844 dm->backlight_caps[bl_idx].caps_valid = true;
3845 if (caps.aux_support)
3847 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3848 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3850 dm->backlight_caps[bl_idx].min_input_signal =
3851 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3852 dm->backlight_caps[bl_idx].max_input_signal =
3853 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3856 if (dm->backlight_caps[bl_idx].aux_support)
3859 dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3860 dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3864 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3865 unsigned *min, unsigned *max)
3870 if (caps->aux_support) {
3871 // Firmware limits are in nits, DC API wants millinits.
3872 *max = 1000 * caps->aux_max_input_signal;
3873 *min = 1000 * caps->aux_min_input_signal;
3875 // Firmware limits are 8-bit, PWM control is 16-bit.
3876 *max = 0x101 * caps->max_input_signal;
3877 *min = 0x101 * caps->min_input_signal;
3882 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3883 uint32_t brightness)
3887 if (!get_brightness_range(caps, &min, &max))
3890 // Rescale 0..255 to min..max
3891 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3892 AMDGPU_MAX_BL_LEVEL);
3895 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3896 uint32_t brightness)
3900 if (!get_brightness_range(caps, &min, &max))
3903 if (brightness < min)
3905 // Rescale min..max to 0..255
3906 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3910 static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3912 u32 user_brightness)
3914 struct amdgpu_dm_backlight_caps caps;
3915 struct dc_link *link;
3919 amdgpu_dm_update_backlight_caps(dm, bl_idx);
3920 caps = dm->backlight_caps[bl_idx];
3922 dm->brightness[bl_idx] = user_brightness;
3923 brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3924 link = (struct dc_link *)dm->backlight_link[bl_idx];
3926 /* Change brightness based on AUX property */
3927 if (caps.aux_support) {
3928 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3929 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3931 DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
3933 rc = dc_link_set_backlight_level(link, brightness, 0);
3935 DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
3939 dm->actual_brightness[bl_idx] = user_brightness;
3942 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3944 struct amdgpu_display_manager *dm = bl_get_data(bd);
3947 for (i = 0; i < dm->num_of_edps; i++) {
3948 if (bd == dm->backlight_dev[i])
3951 if (i >= AMDGPU_DM_MAX_NUM_EDP)
3953 amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
3958 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
3961 struct amdgpu_dm_backlight_caps caps;
3962 struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
3964 amdgpu_dm_update_backlight_caps(dm, bl_idx);
3965 caps = dm->backlight_caps[bl_idx];
3967 if (caps.aux_support) {
3971 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3973 return dm->brightness[bl_idx];
3974 return convert_brightness_to_user(&caps, avg);
3976 int ret = dc_link_get_backlight_level(link);
3978 if (ret == DC_ERROR_UNEXPECTED)
3979 return dm->brightness[bl_idx];
3980 return convert_brightness_to_user(&caps, ret);
3984 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3986 struct amdgpu_display_manager *dm = bl_get_data(bd);
3989 for (i = 0; i < dm->num_of_edps; i++) {
3990 if (bd == dm->backlight_dev[i])
3993 if (i >= AMDGPU_DM_MAX_NUM_EDP)
3995 return amdgpu_dm_backlight_get_level(dm, i);
3998 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3999 .options = BL_CORE_SUSPENDRESUME,
4000 .get_brightness = amdgpu_dm_backlight_get_brightness,
4001 .update_status = amdgpu_dm_backlight_update_status,
4005 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4008 struct backlight_properties props = { 0 };
4010 amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4011 dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4013 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4014 props.brightness = AMDGPU_MAX_BL_LEVEL;
4015 props.type = BACKLIGHT_RAW;
4017 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4018 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4020 dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4021 adev_to_drm(dm->adev)->dev,
4023 &amdgpu_dm_backlight_ops,
4026 if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4027 DRM_ERROR("DM: Backlight registration failed!\n");
4029 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4033 static int initialize_plane(struct amdgpu_display_manager *dm,
4034 struct amdgpu_mode_info *mode_info, int plane_id,
4035 enum drm_plane_type plane_type,
4036 const struct dc_plane_cap *plane_cap)
4038 struct drm_plane *plane;
4039 unsigned long possible_crtcs;
4042 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4044 DRM_ERROR("KMS: Failed to allocate plane\n");
4047 plane->type = plane_type;
4050 * HACK: IGT tests expect that the primary plane for a CRTC
4051 * can only have one possible CRTC. Only expose support for
4052 * any CRTC if they're not going to be used as a primary plane
4053 * for a CRTC - like overlay or underlay planes.
4055 possible_crtcs = 1 << plane_id;
4056 if (plane_id >= dm->dc->caps.max_streams)
4057 possible_crtcs = 0xff;
4059 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4062 DRM_ERROR("KMS: Failed to initialize plane\n");
4068 mode_info->planes[plane_id] = plane;
4074 static void register_backlight_device(struct amdgpu_display_manager *dm,
4075 struct dc_link *link)
4077 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4078 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4080 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4081 link->type != dc_connection_none) {
4083 * Event if registration failed, we should continue with
4084 * DM initialization because not having a backlight control
4085 * is better then a black screen.
4087 if (!dm->backlight_dev[dm->num_of_edps])
4088 amdgpu_dm_register_backlight_device(dm);
4090 if (dm->backlight_dev[dm->num_of_edps]) {
4091 dm->backlight_link[dm->num_of_edps] = link;
4100 * In this architecture, the association
4101 * connector -> encoder -> crtc
4102 * id not really requried. The crtc and connector will hold the
4103 * display_index as an abstraction to use with DAL component
4105 * Returns 0 on success
4107 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4109 struct amdgpu_display_manager *dm = &adev->dm;
4111 struct amdgpu_dm_connector *aconnector = NULL;
4112 struct amdgpu_encoder *aencoder = NULL;
4113 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4115 int32_t primary_planes;
4116 enum dc_connection_type new_connection_type = dc_connection_none;
4117 const struct dc_plane_cap *plane;
4119 dm->display_indexes_num = dm->dc->caps.max_streams;
4120 /* Update the actual used number of crtc */
4121 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4123 link_cnt = dm->dc->caps.max_links;
4124 if (amdgpu_dm_mode_config_init(dm->adev)) {
4125 DRM_ERROR("DM: Failed to initialize mode config\n");
4129 /* There is one primary plane per CRTC */
4130 primary_planes = dm->dc->caps.max_streams;
4131 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4134 * Initialize primary planes, implicit planes for legacy IOCTLS.
4135 * Order is reversed to match iteration order in atomic check.
4137 for (i = (primary_planes - 1); i >= 0; i--) {
4138 plane = &dm->dc->caps.planes[i];
4140 if (initialize_plane(dm, mode_info, i,
4141 DRM_PLANE_TYPE_PRIMARY, plane)) {
4142 DRM_ERROR("KMS: Failed to initialize primary plane\n");
4148 * Initialize overlay planes, index starting after primary planes.
4149 * These planes have a higher DRM index than the primary planes since
4150 * they should be considered as having a higher z-order.
4151 * Order is reversed to match iteration order in atomic check.
4153 * Only support DCN for now, and only expose one so we don't encourage
4154 * userspace to use up all the pipes.
4156 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4157 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4159 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4162 if (!plane->blends_with_above || !plane->blends_with_below)
4165 if (!plane->pixel_format_support.argb8888)
4168 if (initialize_plane(dm, NULL, primary_planes + i,
4169 DRM_PLANE_TYPE_OVERLAY, plane)) {
4170 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4174 /* Only create one overlay plane. */
4178 for (i = 0; i < dm->dc->caps.max_streams; i++)
4179 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4180 DRM_ERROR("KMS: Failed to initialize crtc\n");
4184 #if defined(CONFIG_DRM_AMD_DC_DCN)
4185 /* Use Outbox interrupt */
4186 switch (adev->asic_type) {
4187 case CHIP_SIENNA_CICHLID:
4188 case CHIP_NAVY_FLOUNDER:
4189 case CHIP_YELLOW_CARP:
4191 if (register_outbox_irq_handlers(dm->adev)) {
4192 DRM_ERROR("DM: Failed to initialize IRQ\n");
4197 DRM_DEBUG_KMS("Unsupported ASIC type for outbox: 0x%X\n", adev->asic_type);
4201 /* loops over all connectors on the board */
4202 for (i = 0; i < link_cnt; i++) {
4203 struct dc_link *link = NULL;
4205 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4207 "KMS: Cannot support more than %d display indexes\n",
4208 AMDGPU_DM_MAX_DISPLAY_INDEX);
4212 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4216 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4220 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4221 DRM_ERROR("KMS: Failed to initialize encoder\n");
4225 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4226 DRM_ERROR("KMS: Failed to initialize connector\n");
4230 link = dc_get_link_at_index(dm->dc, i);
4232 if (!dc_link_detect_sink(link, &new_connection_type))
4233 DRM_ERROR("KMS: Failed to detect connector\n");
4235 if (aconnector->base.force && new_connection_type == dc_connection_none) {
4236 emulated_link_detect(link);
4237 amdgpu_dm_update_connector_after_detect(aconnector);
4239 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4240 amdgpu_dm_update_connector_after_detect(aconnector);
4241 register_backlight_device(dm, link);
4243 if (dm->num_of_edps)
4244 update_connector_ext_caps(aconnector);
4245 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
4246 amdgpu_dm_set_psr_caps(link);
4248 /* TODO: Fix vblank control helpers to delay PSR entry to allow this when
4249 * PSR is also supported.
4251 if (link->psr_settings.psr_feature_enabled)
4252 adev_to_drm(adev)->vblank_disable_immediate = false;
4258 /* Software is initialized. Now we can register interrupt handlers. */
4259 switch (adev->asic_type) {
4260 #if defined(CONFIG_DRM_AMD_DC_SI)
4265 if (dce60_register_irq_handlers(dm->adev)) {
4266 DRM_ERROR("DM: Failed to initialize IRQ\n");
4280 case CHIP_POLARIS11:
4281 case CHIP_POLARIS10:
4282 case CHIP_POLARIS12:
4287 if (dce110_register_irq_handlers(dm->adev)) {
4288 DRM_ERROR("DM: Failed to initialize IRQ\n");
4292 #if defined(CONFIG_DRM_AMD_DC_DCN)
4298 case CHIP_SIENNA_CICHLID:
4299 case CHIP_NAVY_FLOUNDER:
4300 case CHIP_DIMGREY_CAVEFISH:
4301 case CHIP_BEIGE_GOBY:
4303 case CHIP_YELLOW_CARP:
4304 if (dcn10_register_irq_handlers(dm->adev)) {
4305 DRM_ERROR("DM: Failed to initialize IRQ\n");
4311 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4323 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4325 drm_atomic_private_obj_fini(&dm->atomic_obj);
4329 /******************************************************************************
4330 * amdgpu_display_funcs functions
4331 *****************************************************************************/
4334 * dm_bandwidth_update - program display watermarks
4336 * @adev: amdgpu_device pointer
4338 * Calculate and program the display watermarks and line buffer allocation.
4340 static void dm_bandwidth_update(struct amdgpu_device *adev)
4342 /* TODO: implement later */
4345 static const struct amdgpu_display_funcs dm_display_funcs = {
4346 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4347 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4348 .backlight_set_level = NULL, /* never called for DC */
4349 .backlight_get_level = NULL, /* never called for DC */
4350 .hpd_sense = NULL,/* called unconditionally */
4351 .hpd_set_polarity = NULL, /* called unconditionally */
4352 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4353 .page_flip_get_scanoutpos =
4354 dm_crtc_get_scanoutpos,/* called unconditionally */
4355 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4356 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4359 #if defined(CONFIG_DEBUG_KERNEL_DC)
4361 static ssize_t s3_debug_store(struct device *device,
4362 struct device_attribute *attr,
4368 struct drm_device *drm_dev = dev_get_drvdata(device);
4369 struct amdgpu_device *adev = drm_to_adev(drm_dev);
4371 ret = kstrtoint(buf, 0, &s3_state);
4376 drm_kms_helper_hotplug_event(adev_to_drm(adev));
4381 return ret == 0 ? count : 0;
4384 DEVICE_ATTR_WO(s3_debug);
4388 static int dm_early_init(void *handle)
4390 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4392 switch (adev->asic_type) {
4393 #if defined(CONFIG_DRM_AMD_DC_SI)
4397 adev->mode_info.num_crtc = 6;
4398 adev->mode_info.num_hpd = 6;
4399 adev->mode_info.num_dig = 6;
4402 adev->mode_info.num_crtc = 2;
4403 adev->mode_info.num_hpd = 2;
4404 adev->mode_info.num_dig = 2;
4409 adev->mode_info.num_crtc = 6;
4410 adev->mode_info.num_hpd = 6;
4411 adev->mode_info.num_dig = 6;
4414 adev->mode_info.num_crtc = 4;
4415 adev->mode_info.num_hpd = 6;
4416 adev->mode_info.num_dig = 7;
4420 adev->mode_info.num_crtc = 2;
4421 adev->mode_info.num_hpd = 6;
4422 adev->mode_info.num_dig = 6;
4426 adev->mode_info.num_crtc = 6;
4427 adev->mode_info.num_hpd = 6;
4428 adev->mode_info.num_dig = 7;
4431 adev->mode_info.num_crtc = 3;
4432 adev->mode_info.num_hpd = 6;
4433 adev->mode_info.num_dig = 9;
4436 adev->mode_info.num_crtc = 2;
4437 adev->mode_info.num_hpd = 6;
4438 adev->mode_info.num_dig = 9;
4440 case CHIP_POLARIS11:
4441 case CHIP_POLARIS12:
4442 adev->mode_info.num_crtc = 5;
4443 adev->mode_info.num_hpd = 5;
4444 adev->mode_info.num_dig = 5;
4446 case CHIP_POLARIS10:
4448 adev->mode_info.num_crtc = 6;
4449 adev->mode_info.num_hpd = 6;
4450 adev->mode_info.num_dig = 6;
4455 adev->mode_info.num_crtc = 6;
4456 adev->mode_info.num_hpd = 6;
4457 adev->mode_info.num_dig = 6;
4459 #if defined(CONFIG_DRM_AMD_DC_DCN)
4463 adev->mode_info.num_crtc = 4;
4464 adev->mode_info.num_hpd = 4;
4465 adev->mode_info.num_dig = 4;
4469 case CHIP_SIENNA_CICHLID:
4470 case CHIP_NAVY_FLOUNDER:
4471 adev->mode_info.num_crtc = 6;
4472 adev->mode_info.num_hpd = 6;
4473 adev->mode_info.num_dig = 6;
4475 case CHIP_YELLOW_CARP:
4476 adev->mode_info.num_crtc = 4;
4477 adev->mode_info.num_hpd = 4;
4478 adev->mode_info.num_dig = 4;
4481 case CHIP_DIMGREY_CAVEFISH:
4482 adev->mode_info.num_crtc = 5;
4483 adev->mode_info.num_hpd = 5;
4484 adev->mode_info.num_dig = 5;
4486 case CHIP_BEIGE_GOBY:
4487 adev->mode_info.num_crtc = 2;
4488 adev->mode_info.num_hpd = 2;
4489 adev->mode_info.num_dig = 2;
4493 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4497 amdgpu_dm_set_irq_funcs(adev);
4499 if (adev->mode_info.funcs == NULL)
4500 adev->mode_info.funcs = &dm_display_funcs;
4503 * Note: Do NOT change adev->audio_endpt_rreg and
4504 * adev->audio_endpt_wreg because they are initialised in
4505 * amdgpu_device_init()
4507 #if defined(CONFIG_DEBUG_KERNEL_DC)
4509 adev_to_drm(adev)->dev,
4510 &dev_attr_s3_debug);
4516 static bool modeset_required(struct drm_crtc_state *crtc_state,
4517 struct dc_stream_state *new_stream,
4518 struct dc_stream_state *old_stream)
4520 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4523 static bool modereset_required(struct drm_crtc_state *crtc_state)
4525 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4528 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4530 drm_encoder_cleanup(encoder);
4534 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4535 .destroy = amdgpu_dm_encoder_destroy,
4539 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4540 struct drm_framebuffer *fb,
4541 int *min_downscale, int *max_upscale)
4543 struct amdgpu_device *adev = drm_to_adev(dev);
4544 struct dc *dc = adev->dm.dc;
4545 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4546 struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4548 switch (fb->format->format) {
4549 case DRM_FORMAT_P010:
4550 case DRM_FORMAT_NV12:
4551 case DRM_FORMAT_NV21:
4552 *max_upscale = plane_cap->max_upscale_factor.nv12;
4553 *min_downscale = plane_cap->max_downscale_factor.nv12;
4556 case DRM_FORMAT_XRGB16161616F:
4557 case DRM_FORMAT_ARGB16161616F:
4558 case DRM_FORMAT_XBGR16161616F:
4559 case DRM_FORMAT_ABGR16161616F:
4560 *max_upscale = plane_cap->max_upscale_factor.fp16;
4561 *min_downscale = plane_cap->max_downscale_factor.fp16;
4565 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4566 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4571 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4572 * scaling factor of 1.0 == 1000 units.
4574 if (*max_upscale == 1)
4575 *max_upscale = 1000;
4577 if (*min_downscale == 1)
4578 *min_downscale = 1000;
4582 static int fill_dc_scaling_info(const struct drm_plane_state *state,
4583 struct dc_scaling_info *scaling_info)
4585 int scale_w, scale_h, min_downscale, max_upscale;
4587 memset(scaling_info, 0, sizeof(*scaling_info));
4589 /* Source is fixed 16.16 but we ignore mantissa for now... */
4590 scaling_info->src_rect.x = state->src_x >> 16;
4591 scaling_info->src_rect.y = state->src_y >> 16;
4594 * For reasons we don't (yet) fully understand a non-zero
4595 * src_y coordinate into an NV12 buffer can cause a
4596 * system hang. To avoid hangs (and maybe be overly cautious)
4597 * let's reject both non-zero src_x and src_y.
4599 * We currently know of only one use-case to reproduce a
4600 * scenario with non-zero src_x and src_y for NV12, which
4601 * is to gesture the YouTube Android app into full screen
4605 state->fb->format->format == DRM_FORMAT_NV12 &&
4606 (scaling_info->src_rect.x != 0 ||
4607 scaling_info->src_rect.y != 0))
4610 scaling_info->src_rect.width = state->src_w >> 16;
4611 if (scaling_info->src_rect.width == 0)
4614 scaling_info->src_rect.height = state->src_h >> 16;
4615 if (scaling_info->src_rect.height == 0)
4618 scaling_info->dst_rect.x = state->crtc_x;
4619 scaling_info->dst_rect.y = state->crtc_y;
4621 if (state->crtc_w == 0)
4624 scaling_info->dst_rect.width = state->crtc_w;
4626 if (state->crtc_h == 0)
4629 scaling_info->dst_rect.height = state->crtc_h;
4631 /* DRM doesn't specify clipping on destination output. */
4632 scaling_info->clip_rect = scaling_info->dst_rect;
4634 /* Validate scaling per-format with DC plane caps */
4635 if (state->plane && state->plane->dev && state->fb) {
4636 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4637 &min_downscale, &max_upscale);
4639 min_downscale = 250;
4640 max_upscale = 16000;
4643 scale_w = scaling_info->dst_rect.width * 1000 /
4644 scaling_info->src_rect.width;
4646 if (scale_w < min_downscale || scale_w > max_upscale)
4649 scale_h = scaling_info->dst_rect.height * 1000 /
4650 scaling_info->src_rect.height;
4652 if (scale_h < min_downscale || scale_h > max_upscale)
4656 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4657 * assume reasonable defaults based on the format.
4664 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4665 uint64_t tiling_flags)
4667 /* Fill GFX8 params */
4668 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4669 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4671 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4672 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4673 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4674 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4675 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4677 /* XXX fix me for VI */
4678 tiling_info->gfx8.num_banks = num_banks;
4679 tiling_info->gfx8.array_mode =
4680 DC_ARRAY_2D_TILED_THIN1;
4681 tiling_info->gfx8.tile_split = tile_split;
4682 tiling_info->gfx8.bank_width = bankw;
4683 tiling_info->gfx8.bank_height = bankh;
4684 tiling_info->gfx8.tile_aspect = mtaspect;
4685 tiling_info->gfx8.tile_mode =
4686 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4687 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4688 == DC_ARRAY_1D_TILED_THIN1) {
4689 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4692 tiling_info->gfx8.pipe_config =
4693 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4697 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4698 union dc_tiling_info *tiling_info)
4700 tiling_info->gfx9.num_pipes =
4701 adev->gfx.config.gb_addr_config_fields.num_pipes;
4702 tiling_info->gfx9.num_banks =
4703 adev->gfx.config.gb_addr_config_fields.num_banks;
4704 tiling_info->gfx9.pipe_interleave =
4705 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4706 tiling_info->gfx9.num_shader_engines =
4707 adev->gfx.config.gb_addr_config_fields.num_se;
4708 tiling_info->gfx9.max_compressed_frags =
4709 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4710 tiling_info->gfx9.num_rb_per_se =
4711 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4712 tiling_info->gfx9.shaderEnable = 1;
4713 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4714 adev->asic_type == CHIP_NAVY_FLOUNDER ||
4715 adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4716 adev->asic_type == CHIP_BEIGE_GOBY ||
4717 adev->asic_type == CHIP_YELLOW_CARP ||
4718 adev->asic_type == CHIP_VANGOGH)
4719 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4723 validate_dcc(struct amdgpu_device *adev,
4724 const enum surface_pixel_format format,
4725 const enum dc_rotation_angle rotation,
4726 const union dc_tiling_info *tiling_info,
4727 const struct dc_plane_dcc_param *dcc,
4728 const struct dc_plane_address *address,
4729 const struct plane_size *plane_size)
4731 struct dc *dc = adev->dm.dc;
4732 struct dc_dcc_surface_param input;
4733 struct dc_surface_dcc_cap output;
4735 memset(&input, 0, sizeof(input));
4736 memset(&output, 0, sizeof(output));
4741 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4742 !dc->cap_funcs.get_dcc_compression_cap)
4745 input.format = format;
4746 input.surface_size.width = plane_size->surface_size.width;
4747 input.surface_size.height = plane_size->surface_size.height;
4748 input.swizzle_mode = tiling_info->gfx9.swizzle;
4750 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4751 input.scan = SCAN_DIRECTION_HORIZONTAL;
4752 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4753 input.scan = SCAN_DIRECTION_VERTICAL;
4755 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4758 if (!output.capable)
4761 if (dcc->independent_64b_blks == 0 &&
4762 output.grph.rgb.independent_64b_blks != 0)
4769 modifier_has_dcc(uint64_t modifier)
4771 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4775 modifier_gfx9_swizzle_mode(uint64_t modifier)
4777 if (modifier == DRM_FORMAT_MOD_LINEAR)
4780 return AMD_FMT_MOD_GET(TILE, modifier);
4783 static const struct drm_format_info *
4784 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4786 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4790 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4791 union dc_tiling_info *tiling_info,
4794 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4795 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4796 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4797 unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4799 fill_gfx9_tiling_info_from_device(adev, tiling_info);
4801 if (!IS_AMD_FMT_MOD(modifier))
4804 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4805 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4807 if (adev->family >= AMDGPU_FAMILY_NV) {
4808 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4810 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4812 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4816 enum dm_micro_swizzle {
4817 MICRO_SWIZZLE_Z = 0,
4818 MICRO_SWIZZLE_S = 1,
4819 MICRO_SWIZZLE_D = 2,
4823 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4827 struct amdgpu_device *adev = drm_to_adev(plane->dev);
4828 const struct drm_format_info *info = drm_format_info(format);
4831 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4837 * We always have to allow these modifiers:
4838 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4839 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4841 if (modifier == DRM_FORMAT_MOD_LINEAR ||
4842 modifier == DRM_FORMAT_MOD_INVALID) {
4846 /* Check that the modifier is on the list of the plane's supported modifiers. */
4847 for (i = 0; i < plane->modifier_count; i++) {
4848 if (modifier == plane->modifiers[i])
4851 if (i == plane->modifier_count)
4855 * For D swizzle the canonical modifier depends on the bpp, so check
4858 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4859 adev->family >= AMDGPU_FAMILY_NV) {
4860 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4864 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4868 if (modifier_has_dcc(modifier)) {
4869 /* Per radeonsi comments 16/64 bpp are more complicated. */
4870 if (info->cpp[0] != 4)
4872 /* We support multi-planar formats, but not when combined with
4873 * additional DCC metadata planes. */
4874 if (info->num_planes > 1)
4882 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4887 if (*cap - *size < 1) {
4888 uint64_t new_cap = *cap * 2;
4889 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4897 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4903 (*mods)[*size] = mod;
4908 add_gfx9_modifiers(const struct amdgpu_device *adev,
4909 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4911 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4912 int pipe_xor_bits = min(8, pipes +
4913 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4914 int bank_xor_bits = min(8 - pipe_xor_bits,
4915 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4916 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4917 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4920 if (adev->family == AMDGPU_FAMILY_RV) {
4921 /* Raven2 and later */
4922 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4925 * No _D DCC swizzles yet because we only allow 32bpp, which
4926 * doesn't support _D on DCN
4929 if (has_constant_encode) {
4930 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4931 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4932 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4933 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4934 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4935 AMD_FMT_MOD_SET(DCC, 1) |
4936 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4937 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4938 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4941 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4942 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4943 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4944 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4945 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4946 AMD_FMT_MOD_SET(DCC, 1) |
4947 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4948 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4949 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4951 if (has_constant_encode) {
4952 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4953 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4954 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4955 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4956 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4957 AMD_FMT_MOD_SET(DCC, 1) |
4958 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4959 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4960 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4962 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4963 AMD_FMT_MOD_SET(RB, rb) |
4964 AMD_FMT_MOD_SET(PIPE, pipes));
4967 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4968 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4969 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4970 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4971 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4972 AMD_FMT_MOD_SET(DCC, 1) |
4973 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4974 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4975 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4976 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4977 AMD_FMT_MOD_SET(RB, rb) |
4978 AMD_FMT_MOD_SET(PIPE, pipes));
4982 * Only supported for 64bpp on Raven, will be filtered on format in
4983 * dm_plane_format_mod_supported.
4985 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4986 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4987 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4988 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4989 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4991 if (adev->family == AMDGPU_FAMILY_RV) {
4992 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4993 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4994 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4995 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4996 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5000 * Only supported for 64bpp on Raven, will be filtered on format in
5001 * dm_plane_format_mod_supported.
5003 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5004 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5005 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5007 if (adev->family == AMDGPU_FAMILY_RV) {
5008 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5009 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5010 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5015 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5016 uint64_t **mods, uint64_t *size, uint64_t *capacity)
5018 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5020 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5021 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5022 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5023 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5024 AMD_FMT_MOD_SET(DCC, 1) |
5025 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5026 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5027 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5029 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5030 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5031 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5032 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5033 AMD_FMT_MOD_SET(DCC, 1) |
5034 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5035 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5036 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5037 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5039 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5040 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5041 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5042 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5044 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5045 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5046 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5047 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5050 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5051 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5052 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5053 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5055 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5056 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5057 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5061 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5062 uint64_t **mods, uint64_t *size, uint64_t *capacity)
5064 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5065 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5067 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5068 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5069 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5070 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5071 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5072 AMD_FMT_MOD_SET(DCC, 1) |
5073 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5074 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5075 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5076 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5078 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5079 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5080 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5081 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5082 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5083 AMD_FMT_MOD_SET(DCC, 1) |
5084 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5085 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5086 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5087 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5088 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5090 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5091 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5092 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5093 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5094 AMD_FMT_MOD_SET(PACKERS, pkrs));
5096 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5097 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5098 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5099 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5100 AMD_FMT_MOD_SET(PACKERS, pkrs));
5102 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5103 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5104 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5105 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5107 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5108 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5109 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5113 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5115 uint64_t size = 0, capacity = 128;
5118 /* We have not hooked up any pre-GFX9 modifiers. */
5119 if (adev->family < AMDGPU_FAMILY_AI)
5122 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5124 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5125 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5126 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5127 return *mods ? 0 : -ENOMEM;
5130 switch (adev->family) {
5131 case AMDGPU_FAMILY_AI:
5132 case AMDGPU_FAMILY_RV:
5133 add_gfx9_modifiers(adev, mods, &size, &capacity);
5135 case AMDGPU_FAMILY_NV:
5136 case AMDGPU_FAMILY_VGH:
5137 case AMDGPU_FAMILY_YC:
5138 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
5139 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5141 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5145 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5147 /* INVALID marks the end of the list. */
5148 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5157 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5158 const struct amdgpu_framebuffer *afb,
5159 const enum surface_pixel_format format,
5160 const enum dc_rotation_angle rotation,
5161 const struct plane_size *plane_size,
5162 union dc_tiling_info *tiling_info,
5163 struct dc_plane_dcc_param *dcc,
5164 struct dc_plane_address *address,
5165 const bool force_disable_dcc)
5167 const uint64_t modifier = afb->base.modifier;
5170 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5171 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5173 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5174 uint64_t dcc_address = afb->address + afb->base.offsets[1];
5177 dcc->meta_pitch = afb->base.pitches[1];
5178 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
5180 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5181 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5184 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5186 drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
5192 fill_plane_buffer_attributes(struct amdgpu_device *adev,
5193 const struct amdgpu_framebuffer *afb,
5194 const enum surface_pixel_format format,
5195 const enum dc_rotation_angle rotation,
5196 const uint64_t tiling_flags,
5197 union dc_tiling_info *tiling_info,
5198 struct plane_size *plane_size,
5199 struct dc_plane_dcc_param *dcc,
5200 struct dc_plane_address *address,
5202 bool force_disable_dcc)
5204 const struct drm_framebuffer *fb = &afb->base;
5207 memset(tiling_info, 0, sizeof(*tiling_info));
5208 memset(plane_size, 0, sizeof(*plane_size));
5209 memset(dcc, 0, sizeof(*dcc));
5210 memset(address, 0, sizeof(*address));
5212 address->tmz_surface = tmz_surface;
5214 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
5215 uint64_t addr = afb->address + fb->offsets[0];
5217 plane_size->surface_size.x = 0;
5218 plane_size->surface_size.y = 0;
5219 plane_size->surface_size.width = fb->width;
5220 plane_size->surface_size.height = fb->height;
5221 plane_size->surface_pitch =
5222 fb->pitches[0] / fb->format->cpp[0];
5224 address->type = PLN_ADDR_TYPE_GRAPHICS;
5225 address->grph.addr.low_part = lower_32_bits(addr);
5226 address->grph.addr.high_part = upper_32_bits(addr);
5227 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
5228 uint64_t luma_addr = afb->address + fb->offsets[0];
5229 uint64_t chroma_addr = afb->address + fb->offsets[1];
5231 plane_size->surface_size.x = 0;
5232 plane_size->surface_size.y = 0;
5233 plane_size->surface_size.width = fb->width;
5234 plane_size->surface_size.height = fb->height;
5235 plane_size->surface_pitch =
5236 fb->pitches[0] / fb->format->cpp[0];
5238 plane_size->chroma_size.x = 0;
5239 plane_size->chroma_size.y = 0;
5240 /* TODO: set these based on surface format */
5241 plane_size->chroma_size.width = fb->width / 2;
5242 plane_size->chroma_size.height = fb->height / 2;
5244 plane_size->chroma_pitch =
5245 fb->pitches[1] / fb->format->cpp[1];
5247 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5248 address->video_progressive.luma_addr.low_part =
5249 lower_32_bits(luma_addr);
5250 address->video_progressive.luma_addr.high_part =
5251 upper_32_bits(luma_addr);
5252 address->video_progressive.chroma_addr.low_part =
5253 lower_32_bits(chroma_addr);
5254 address->video_progressive.chroma_addr.high_part =
5255 upper_32_bits(chroma_addr);
5258 if (adev->family >= AMDGPU_FAMILY_AI) {
5259 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5260 rotation, plane_size,
5267 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
5274 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
5275 bool *per_pixel_alpha, bool *global_alpha,
5276 int *global_alpha_value)
5278 *per_pixel_alpha = false;
5279 *global_alpha = false;
5280 *global_alpha_value = 0xff;
5282 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5285 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
5286 static const uint32_t alpha_formats[] = {
5287 DRM_FORMAT_ARGB8888,
5288 DRM_FORMAT_RGBA8888,
5289 DRM_FORMAT_ABGR8888,
5291 uint32_t format = plane_state->fb->format->format;
5294 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5295 if (format == alpha_formats[i]) {
5296 *per_pixel_alpha = true;
5302 if (plane_state->alpha < 0xffff) {
5303 *global_alpha = true;
5304 *global_alpha_value = plane_state->alpha >> 8;
5309 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5310 const enum surface_pixel_format format,
5311 enum dc_color_space *color_space)
5315 *color_space = COLOR_SPACE_SRGB;
5317 /* DRM color properties only affect non-RGB formats. */
5318 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5321 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5323 switch (plane_state->color_encoding) {
5324 case DRM_COLOR_YCBCR_BT601:
5326 *color_space = COLOR_SPACE_YCBCR601;
5328 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
5331 case DRM_COLOR_YCBCR_BT709:
5333 *color_space = COLOR_SPACE_YCBCR709;
5335 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
5338 case DRM_COLOR_YCBCR_BT2020:
5340 *color_space = COLOR_SPACE_2020_YCBCR;
5353 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5354 const struct drm_plane_state *plane_state,
5355 const uint64_t tiling_flags,
5356 struct dc_plane_info *plane_info,
5357 struct dc_plane_address *address,
5359 bool force_disable_dcc)
5361 const struct drm_framebuffer *fb = plane_state->fb;
5362 const struct amdgpu_framebuffer *afb =
5363 to_amdgpu_framebuffer(plane_state->fb);
5366 memset(plane_info, 0, sizeof(*plane_info));
5368 switch (fb->format->format) {
5370 plane_info->format =
5371 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5373 case DRM_FORMAT_RGB565:
5374 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5376 case DRM_FORMAT_XRGB8888:
5377 case DRM_FORMAT_ARGB8888:
5378 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5380 case DRM_FORMAT_XRGB2101010:
5381 case DRM_FORMAT_ARGB2101010:
5382 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5384 case DRM_FORMAT_XBGR2101010:
5385 case DRM_FORMAT_ABGR2101010:
5386 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5388 case DRM_FORMAT_XBGR8888:
5389 case DRM_FORMAT_ABGR8888:
5390 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5392 case DRM_FORMAT_NV21:
5393 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5395 case DRM_FORMAT_NV12:
5396 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5398 case DRM_FORMAT_P010:
5399 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5401 case DRM_FORMAT_XRGB16161616F:
5402 case DRM_FORMAT_ARGB16161616F:
5403 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5405 case DRM_FORMAT_XBGR16161616F:
5406 case DRM_FORMAT_ABGR16161616F:
5407 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5409 case DRM_FORMAT_XRGB16161616:
5410 case DRM_FORMAT_ARGB16161616:
5411 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5413 case DRM_FORMAT_XBGR16161616:
5414 case DRM_FORMAT_ABGR16161616:
5415 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5419 "Unsupported screen format %p4cc\n",
5420 &fb->format->format);
5424 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5425 case DRM_MODE_ROTATE_0:
5426 plane_info->rotation = ROTATION_ANGLE_0;
5428 case DRM_MODE_ROTATE_90:
5429 plane_info->rotation = ROTATION_ANGLE_90;
5431 case DRM_MODE_ROTATE_180:
5432 plane_info->rotation = ROTATION_ANGLE_180;
5434 case DRM_MODE_ROTATE_270:
5435 plane_info->rotation = ROTATION_ANGLE_270;
5438 plane_info->rotation = ROTATION_ANGLE_0;
5442 plane_info->visible = true;
5443 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5445 plane_info->layer_index = 0;
5447 ret = fill_plane_color_attributes(plane_state, plane_info->format,
5448 &plane_info->color_space);
5452 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5453 plane_info->rotation, tiling_flags,
5454 &plane_info->tiling_info,
5455 &plane_info->plane_size,
5456 &plane_info->dcc, address, tmz_surface,
5461 fill_blending_from_plane_state(
5462 plane_state, &plane_info->per_pixel_alpha,
5463 &plane_info->global_alpha, &plane_info->global_alpha_value);
5468 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5469 struct dc_plane_state *dc_plane_state,
5470 struct drm_plane_state *plane_state,
5471 struct drm_crtc_state *crtc_state)
5473 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5474 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5475 struct dc_scaling_info scaling_info;
5476 struct dc_plane_info plane_info;
5478 bool force_disable_dcc = false;
5480 ret = fill_dc_scaling_info(plane_state, &scaling_info);
5484 dc_plane_state->src_rect = scaling_info.src_rect;
5485 dc_plane_state->dst_rect = scaling_info.dst_rect;
5486 dc_plane_state->clip_rect = scaling_info.clip_rect;
5487 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5489 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5490 ret = fill_dc_plane_info_and_addr(adev, plane_state,
5493 &dc_plane_state->address,
5499 dc_plane_state->format = plane_info.format;
5500 dc_plane_state->color_space = plane_info.color_space;
5501 dc_plane_state->format = plane_info.format;
5502 dc_plane_state->plane_size = plane_info.plane_size;
5503 dc_plane_state->rotation = plane_info.rotation;
5504 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5505 dc_plane_state->stereo_format = plane_info.stereo_format;
5506 dc_plane_state->tiling_info = plane_info.tiling_info;
5507 dc_plane_state->visible = plane_info.visible;
5508 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5509 dc_plane_state->global_alpha = plane_info.global_alpha;
5510 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5511 dc_plane_state->dcc = plane_info.dcc;
5512 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5513 dc_plane_state->flip_int_enabled = true;
5516 * Always set input transfer function, since plane state is refreshed
5519 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5526 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5527 const struct dm_connector_state *dm_state,
5528 struct dc_stream_state *stream)
5530 enum amdgpu_rmx_type rmx_type;
5532 struct rect src = { 0 }; /* viewport in composition space*/
5533 struct rect dst = { 0 }; /* stream addressable area */
5535 /* no mode. nothing to be done */
5539 /* Full screen scaling by default */
5540 src.width = mode->hdisplay;
5541 src.height = mode->vdisplay;
5542 dst.width = stream->timing.h_addressable;
5543 dst.height = stream->timing.v_addressable;
5546 rmx_type = dm_state->scaling;
5547 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5548 if (src.width * dst.height <
5549 src.height * dst.width) {
5550 /* height needs less upscaling/more downscaling */
5551 dst.width = src.width *
5552 dst.height / src.height;
5554 /* width needs less upscaling/more downscaling */
5555 dst.height = src.height *
5556 dst.width / src.width;
5558 } else if (rmx_type == RMX_CENTER) {
5562 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5563 dst.y = (stream->timing.v_addressable - dst.height) / 2;
5565 if (dm_state->underscan_enable) {
5566 dst.x += dm_state->underscan_hborder / 2;
5567 dst.y += dm_state->underscan_vborder / 2;
5568 dst.width -= dm_state->underscan_hborder;
5569 dst.height -= dm_state->underscan_vborder;
5576 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
5577 dst.x, dst.y, dst.width, dst.height);
5581 static enum dc_color_depth
5582 convert_color_depth_from_display_info(const struct drm_connector *connector,
5583 bool is_y420, int requested_bpc)
5590 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5591 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5593 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5595 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5598 bpc = (uint8_t)connector->display_info.bpc;
5599 /* Assume 8 bpc by default if no bpc is specified. */
5600 bpc = bpc ? bpc : 8;
5603 if (requested_bpc > 0) {
5605 * Cap display bpc based on the user requested value.
5607 * The value for state->max_bpc may not correctly updated
5608 * depending on when the connector gets added to the state
5609 * or if this was called outside of atomic check, so it
5610 * can't be used directly.
5612 bpc = min_t(u8, bpc, requested_bpc);
5614 /* Round down to the nearest even number. */
5615 bpc = bpc - (bpc & 1);
5621 * Temporary Work around, DRM doesn't parse color depth for
5622 * EDID revision before 1.4
5623 * TODO: Fix edid parsing
5625 return COLOR_DEPTH_888;
5627 return COLOR_DEPTH_666;
5629 return COLOR_DEPTH_888;
5631 return COLOR_DEPTH_101010;
5633 return COLOR_DEPTH_121212;
5635 return COLOR_DEPTH_141414;
5637 return COLOR_DEPTH_161616;
5639 return COLOR_DEPTH_UNDEFINED;
5643 static enum dc_aspect_ratio
5644 get_aspect_ratio(const struct drm_display_mode *mode_in)
5646 /* 1-1 mapping, since both enums follow the HDMI spec. */
5647 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5650 static enum dc_color_space
5651 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5653 enum dc_color_space color_space = COLOR_SPACE_SRGB;
5655 switch (dc_crtc_timing->pixel_encoding) {
5656 case PIXEL_ENCODING_YCBCR422:
5657 case PIXEL_ENCODING_YCBCR444:
5658 case PIXEL_ENCODING_YCBCR420:
5661 * 27030khz is the separation point between HDTV and SDTV
5662 * according to HDMI spec, we use YCbCr709 and YCbCr601
5665 if (dc_crtc_timing->pix_clk_100hz > 270300) {
5666 if (dc_crtc_timing->flags.Y_ONLY)
5668 COLOR_SPACE_YCBCR709_LIMITED;
5670 color_space = COLOR_SPACE_YCBCR709;
5672 if (dc_crtc_timing->flags.Y_ONLY)
5674 COLOR_SPACE_YCBCR601_LIMITED;
5676 color_space = COLOR_SPACE_YCBCR601;
5681 case PIXEL_ENCODING_RGB:
5682 color_space = COLOR_SPACE_SRGB;
5693 static bool adjust_colour_depth_from_display_info(
5694 struct dc_crtc_timing *timing_out,
5695 const struct drm_display_info *info)
5697 enum dc_color_depth depth = timing_out->display_color_depth;
5700 normalized_clk = timing_out->pix_clk_100hz / 10;
5701 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5702 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5703 normalized_clk /= 2;
5704 /* Adjusting pix clock following on HDMI spec based on colour depth */
5706 case COLOR_DEPTH_888:
5708 case COLOR_DEPTH_101010:
5709 normalized_clk = (normalized_clk * 30) / 24;
5711 case COLOR_DEPTH_121212:
5712 normalized_clk = (normalized_clk * 36) / 24;
5714 case COLOR_DEPTH_161616:
5715 normalized_clk = (normalized_clk * 48) / 24;
5718 /* The above depths are the only ones valid for HDMI. */
5721 if (normalized_clk <= info->max_tmds_clock) {
5722 timing_out->display_color_depth = depth;
5725 } while (--depth > COLOR_DEPTH_666);
5729 static void fill_stream_properties_from_drm_display_mode(
5730 struct dc_stream_state *stream,
5731 const struct drm_display_mode *mode_in,
5732 const struct drm_connector *connector,
5733 const struct drm_connector_state *connector_state,
5734 const struct dc_stream_state *old_stream,
5737 struct dc_crtc_timing *timing_out = &stream->timing;
5738 const struct drm_display_info *info = &connector->display_info;
5739 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5740 struct hdmi_vendor_infoframe hv_frame;
5741 struct hdmi_avi_infoframe avi_frame;
5743 memset(&hv_frame, 0, sizeof(hv_frame));
5744 memset(&avi_frame, 0, sizeof(avi_frame));
5746 timing_out->h_border_left = 0;
5747 timing_out->h_border_right = 0;
5748 timing_out->v_border_top = 0;
5749 timing_out->v_border_bottom = 0;
5750 /* TODO: un-hardcode */
5751 if (drm_mode_is_420_only(info, mode_in)
5752 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5753 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5754 else if (drm_mode_is_420_also(info, mode_in)
5755 && aconnector->force_yuv420_output)
5756 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5757 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5758 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5759 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5761 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5763 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5764 timing_out->display_color_depth = convert_color_depth_from_display_info(
5766 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5768 timing_out->scan_type = SCANNING_TYPE_NODATA;
5769 timing_out->hdmi_vic = 0;
5772 timing_out->vic = old_stream->timing.vic;
5773 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5774 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5776 timing_out->vic = drm_match_cea_mode(mode_in);
5777 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5778 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5779 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5780 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5783 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5784 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5785 timing_out->vic = avi_frame.video_code;
5786 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5787 timing_out->hdmi_vic = hv_frame.vic;
5790 if (is_freesync_video_mode(mode_in, aconnector)) {
5791 timing_out->h_addressable = mode_in->hdisplay;
5792 timing_out->h_total = mode_in->htotal;
5793 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5794 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5795 timing_out->v_total = mode_in->vtotal;
5796 timing_out->v_addressable = mode_in->vdisplay;
5797 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5798 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5799 timing_out->pix_clk_100hz = mode_in->clock * 10;
5801 timing_out->h_addressable = mode_in->crtc_hdisplay;
5802 timing_out->h_total = mode_in->crtc_htotal;
5803 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5804 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5805 timing_out->v_total = mode_in->crtc_vtotal;
5806 timing_out->v_addressable = mode_in->crtc_vdisplay;
5807 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5808 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5809 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5812 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5814 stream->output_color_space = get_output_color_space(timing_out);
5816 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5817 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5818 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5819 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5820 drm_mode_is_420_also(info, mode_in) &&
5821 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5822 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5823 adjust_colour_depth_from_display_info(timing_out, info);
5828 static void fill_audio_info(struct audio_info *audio_info,
5829 const struct drm_connector *drm_connector,
5830 const struct dc_sink *dc_sink)
5833 int cea_revision = 0;
5834 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5836 audio_info->manufacture_id = edid_caps->manufacturer_id;
5837 audio_info->product_id = edid_caps->product_id;
5839 cea_revision = drm_connector->display_info.cea_rev;
5841 strscpy(audio_info->display_name,
5842 edid_caps->display_name,
5843 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5845 if (cea_revision >= 3) {
5846 audio_info->mode_count = edid_caps->audio_mode_count;
5848 for (i = 0; i < audio_info->mode_count; ++i) {
5849 audio_info->modes[i].format_code =
5850 (enum audio_format_code)
5851 (edid_caps->audio_modes[i].format_code);
5852 audio_info->modes[i].channel_count =
5853 edid_caps->audio_modes[i].channel_count;
5854 audio_info->modes[i].sample_rates.all =
5855 edid_caps->audio_modes[i].sample_rate;
5856 audio_info->modes[i].sample_size =
5857 edid_caps->audio_modes[i].sample_size;
5861 audio_info->flags.all = edid_caps->speaker_flags;
5863 /* TODO: We only check for the progressive mode, check for interlace mode too */
5864 if (drm_connector->latency_present[0]) {
5865 audio_info->video_latency = drm_connector->video_latency[0];
5866 audio_info->audio_latency = drm_connector->audio_latency[0];
5869 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5874 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5875 struct drm_display_mode *dst_mode)
5877 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5878 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5879 dst_mode->crtc_clock = src_mode->crtc_clock;
5880 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5881 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5882 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
5883 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5884 dst_mode->crtc_htotal = src_mode->crtc_htotal;
5885 dst_mode->crtc_hskew = src_mode->crtc_hskew;
5886 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5887 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5888 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5889 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5890 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5894 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5895 const struct drm_display_mode *native_mode,
5898 if (scale_enabled) {
5899 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5900 } else if (native_mode->clock == drm_mode->clock &&
5901 native_mode->htotal == drm_mode->htotal &&
5902 native_mode->vtotal == drm_mode->vtotal) {
5903 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5905 /* no scaling nor amdgpu inserted, no need to patch */
5909 static struct dc_sink *
5910 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5912 struct dc_sink_init_data sink_init_data = { 0 };
5913 struct dc_sink *sink = NULL;
5914 sink_init_data.link = aconnector->dc_link;
5915 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5917 sink = dc_sink_create(&sink_init_data);
5919 DRM_ERROR("Failed to create sink!\n");
5922 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5927 static void set_multisync_trigger_params(
5928 struct dc_stream_state *stream)
5930 struct dc_stream_state *master = NULL;
5932 if (stream->triggered_crtc_reset.enabled) {
5933 master = stream->triggered_crtc_reset.event_source;
5934 stream->triggered_crtc_reset.event =
5935 master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5936 CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5937 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5941 static void set_master_stream(struct dc_stream_state *stream_set[],
5944 int j, highest_rfr = 0, master_stream = 0;
5946 for (j = 0; j < stream_count; j++) {
5947 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5948 int refresh_rate = 0;
5950 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5951 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5952 if (refresh_rate > highest_rfr) {
5953 highest_rfr = refresh_rate;
5958 for (j = 0; j < stream_count; j++) {
5960 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5964 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5967 struct dc_stream_state *stream;
5969 if (context->stream_count < 2)
5971 for (i = 0; i < context->stream_count ; i++) {
5972 if (!context->streams[i])
5975 * TODO: add a function to read AMD VSDB bits and set
5976 * crtc_sync_master.multi_sync_enabled flag
5977 * For now it's set to false
5981 set_master_stream(context->streams, context->stream_count);
5983 for (i = 0; i < context->stream_count ; i++) {
5984 stream = context->streams[i];
5989 set_multisync_trigger_params(stream);
5993 #if defined(CONFIG_DRM_AMD_DC_DCN)
5994 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
5995 struct dc_sink *sink, struct dc_stream_state *stream,
5996 struct dsc_dec_dpcd_caps *dsc_caps)
5998 stream->timing.flags.DSC = 0;
5999 dsc_caps->is_dsc_supported = false;
6001 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6002 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6003 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6004 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6009 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6010 struct dc_sink *sink, struct dc_stream_state *stream,
6011 struct dsc_dec_dpcd_caps *dsc_caps)
6013 struct drm_connector *drm_connector = &aconnector->base;
6014 uint32_t link_bandwidth_kbps;
6016 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6017 dc_link_get_link_cap(aconnector->dc_link));
6018 /* Set DSC policy according to dsc_clock_en */
6019 dc_dsc_policy_set_enable_dsc_when_not_needed(
6020 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6022 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6024 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6026 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6028 link_bandwidth_kbps,
6030 &stream->timing.dsc_cfg)) {
6031 stream->timing.flags.DSC = 1;
6032 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
6036 /* Overwrite the stream flag if DSC is enabled through debugfs */
6037 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6038 stream->timing.flags.DSC = 1;
6040 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6041 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6043 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6044 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6046 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6047 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6052 * DOC: FreeSync Video
6054 * When a userspace application wants to play a video, the content follows a
6055 * standard format definition that usually specifies the FPS for that format.
6056 * The below list illustrates some video format and the expected FPS,
6059 * - TV/NTSC (23.976 FPS)
6062 * - TV/NTSC (29.97 FPS)
6063 * - TV/NTSC (30 FPS)
6064 * - Cinema HFR (48 FPS)
6066 * - Commonly used (60 FPS)
6067 * - Multiples of 24 (48,72,96 FPS)
6069 * The list of standards video format is not huge and can be added to the
6070 * connector modeset list beforehand. With that, userspace can leverage
6071 * FreeSync to extends the front porch in order to attain the target refresh
6072 * rate. Such a switch will happen seamlessly, without screen blanking or
6073 * reprogramming of the output in any other way. If the userspace requests a
6074 * modesetting change compatible with FreeSync modes that only differ in the
6075 * refresh rate, DC will skip the full update and avoid blink during the
6076 * transition. For example, the video player can change the modesetting from
6077 * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6078 * causing any display blink. This same concept can be applied to a mode
6081 static struct drm_display_mode *
6082 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6083 bool use_probed_modes)
6085 struct drm_display_mode *m, *m_pref = NULL;
6086 u16 current_refresh, highest_refresh;
6087 struct list_head *list_head = use_probed_modes ?
6088 &aconnector->base.probed_modes :
6089 &aconnector->base.modes;
6091 if (aconnector->freesync_vid_base.clock != 0)
6092 return &aconnector->freesync_vid_base;
6094 /* Find the preferred mode */
6095 list_for_each_entry (m, list_head, head) {
6096 if (m->type & DRM_MODE_TYPE_PREFERRED) {
6103 /* Probably an EDID with no preferred mode. Fallback to first entry */
6104 m_pref = list_first_entry_or_null(
6105 &aconnector->base.modes, struct drm_display_mode, head);
6107 DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6112 highest_refresh = drm_mode_vrefresh(m_pref);
6115 * Find the mode with highest refresh rate with same resolution.
6116 * For some monitors, preferred mode is not the mode with highest
6117 * supported refresh rate.
6119 list_for_each_entry (m, list_head, head) {
6120 current_refresh = drm_mode_vrefresh(m);
6122 if (m->hdisplay == m_pref->hdisplay &&
6123 m->vdisplay == m_pref->vdisplay &&
6124 highest_refresh < current_refresh) {
6125 highest_refresh = current_refresh;
6130 aconnector->freesync_vid_base = *m_pref;
6134 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6135 struct amdgpu_dm_connector *aconnector)
6137 struct drm_display_mode *high_mode;
6140 high_mode = get_highest_refresh_rate_mode(aconnector, false);
6141 if (!high_mode || !mode)
6144 timing_diff = high_mode->vtotal - mode->vtotal;
6146 if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6147 high_mode->hdisplay != mode->hdisplay ||
6148 high_mode->vdisplay != mode->vdisplay ||
6149 high_mode->hsync_start != mode->hsync_start ||
6150 high_mode->hsync_end != mode->hsync_end ||
6151 high_mode->htotal != mode->htotal ||
6152 high_mode->hskew != mode->hskew ||
6153 high_mode->vscan != mode->vscan ||
6154 high_mode->vsync_start - mode->vsync_start != timing_diff ||
6155 high_mode->vsync_end - mode->vsync_end != timing_diff)
6161 static struct dc_stream_state *
6162 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6163 const struct drm_display_mode *drm_mode,
6164 const struct dm_connector_state *dm_state,
6165 const struct dc_stream_state *old_stream,
6168 struct drm_display_mode *preferred_mode = NULL;
6169 struct drm_connector *drm_connector;
6170 const struct drm_connector_state *con_state =
6171 dm_state ? &dm_state->base : NULL;
6172 struct dc_stream_state *stream = NULL;
6173 struct drm_display_mode mode = *drm_mode;
6174 struct drm_display_mode saved_mode;
6175 struct drm_display_mode *freesync_mode = NULL;
6176 bool native_mode_found = false;
6177 bool recalculate_timing = false;
6178 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
6180 int preferred_refresh = 0;
6181 #if defined(CONFIG_DRM_AMD_DC_DCN)
6182 struct dsc_dec_dpcd_caps dsc_caps;
6184 struct dc_sink *sink = NULL;
6186 memset(&saved_mode, 0, sizeof(saved_mode));
6188 if (aconnector == NULL) {
6189 DRM_ERROR("aconnector is NULL!\n");
6193 drm_connector = &aconnector->base;
6195 if (!aconnector->dc_sink) {
6196 sink = create_fake_sink(aconnector);
6200 sink = aconnector->dc_sink;
6201 dc_sink_retain(sink);
6204 stream = dc_create_stream_for_sink(sink);
6206 if (stream == NULL) {
6207 DRM_ERROR("Failed to create stream for sink!\n");
6211 stream->dm_stream_context = aconnector;
6213 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6214 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6216 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6217 /* Search for preferred mode */
6218 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6219 native_mode_found = true;
6223 if (!native_mode_found)
6224 preferred_mode = list_first_entry_or_null(
6225 &aconnector->base.modes,
6226 struct drm_display_mode,
6229 mode_refresh = drm_mode_vrefresh(&mode);
6231 if (preferred_mode == NULL) {
6233 * This may not be an error, the use case is when we have no
6234 * usermode calls to reset and set mode upon hotplug. In this
6235 * case, we call set mode ourselves to restore the previous mode
6236 * and the modelist may not be filled in in time.
6238 DRM_DEBUG_DRIVER("No preferred mode found\n");
6240 recalculate_timing = amdgpu_freesync_vid_mode &&
6241 is_freesync_video_mode(&mode, aconnector);
6242 if (recalculate_timing) {
6243 freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6245 mode = *freesync_mode;
6247 decide_crtc_timing_for_drm_display_mode(
6248 &mode, preferred_mode, scale);
6250 preferred_refresh = drm_mode_vrefresh(preferred_mode);
6254 if (recalculate_timing)
6255 drm_mode_set_crtcinfo(&saved_mode, 0);
6257 drm_mode_set_crtcinfo(&mode, 0);
6260 * If scaling is enabled and refresh rate didn't change
6261 * we copy the vic and polarities of the old timings
6263 if (!scale || mode_refresh != preferred_refresh)
6264 fill_stream_properties_from_drm_display_mode(
6265 stream, &mode, &aconnector->base, con_state, NULL,
6268 fill_stream_properties_from_drm_display_mode(
6269 stream, &mode, &aconnector->base, con_state, old_stream,
6272 #if defined(CONFIG_DRM_AMD_DC_DCN)
6273 /* SST DSC determination policy */
6274 update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6275 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6276 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6279 update_stream_scaling_settings(&mode, dm_state, stream);
6282 &stream->audio_info,
6286 update_stream_signal(stream, sink);
6288 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6289 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6291 if (stream->link->psr_settings.psr_feature_enabled) {
6293 // should decide stream support vsc sdp colorimetry capability
6294 // before building vsc info packet
6296 stream->use_vsc_sdp_for_colorimetry = false;
6297 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6298 stream->use_vsc_sdp_for_colorimetry =
6299 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6301 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6302 stream->use_vsc_sdp_for_colorimetry = true;
6304 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
6305 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6309 dc_sink_release(sink);
6314 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
6316 drm_crtc_cleanup(crtc);
6320 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
6321 struct drm_crtc_state *state)
6323 struct dm_crtc_state *cur = to_dm_crtc_state(state);
6325 /* TODO Destroy dc_stream objects are stream object is flattened */
6327 dc_stream_release(cur->stream);
6330 __drm_atomic_helper_crtc_destroy_state(state);
6336 static void dm_crtc_reset_state(struct drm_crtc *crtc)
6338 struct dm_crtc_state *state;
6341 dm_crtc_destroy_state(crtc, crtc->state);
6343 state = kzalloc(sizeof(*state), GFP_KERNEL);
6344 if (WARN_ON(!state))
6347 __drm_atomic_helper_crtc_reset(crtc, &state->base);
6350 static struct drm_crtc_state *
6351 dm_crtc_duplicate_state(struct drm_crtc *crtc)
6353 struct dm_crtc_state *state, *cur;
6355 cur = to_dm_crtc_state(crtc->state);
6357 if (WARN_ON(!crtc->state))
6360 state = kzalloc(sizeof(*state), GFP_KERNEL);
6364 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6367 state->stream = cur->stream;
6368 dc_stream_retain(state->stream);
6371 state->active_planes = cur->active_planes;
6372 state->vrr_infopacket = cur->vrr_infopacket;
6373 state->abm_level = cur->abm_level;
6374 state->vrr_supported = cur->vrr_supported;
6375 state->freesync_config = cur->freesync_config;
6376 state->cm_has_degamma = cur->cm_has_degamma;
6377 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6378 /* TODO Duplicate dc_stream after objects are stream object is flattened */
6380 return &state->base;
6383 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
6384 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6386 crtc_debugfs_init(crtc);
6392 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6394 enum dc_irq_source irq_source;
6395 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6396 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6399 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6401 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6403 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6404 acrtc->crtc_id, enable ? "en" : "dis", rc);
6408 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6410 enum dc_irq_source irq_source;
6411 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6412 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6413 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6414 #if defined(CONFIG_DRM_AMD_DC_DCN)
6415 struct amdgpu_display_manager *dm = &adev->dm;
6416 struct vblank_control_work *work;
6421 /* vblank irq on -> Only need vupdate irq in vrr mode */
6422 if (amdgpu_dm_vrr_active(acrtc_state))
6423 rc = dm_set_vupdate_irq(crtc, true);
6425 /* vblank irq off -> vupdate irq off */
6426 rc = dm_set_vupdate_irq(crtc, false);
6432 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6434 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6437 if (amdgpu_in_reset(adev))
6440 #if defined(CONFIG_DRM_AMD_DC_DCN)
6441 if (dm->vblank_control_workqueue) {
6442 work = kzalloc(sizeof(*work), GFP_ATOMIC);
6446 INIT_WORK(&work->work, vblank_control_worker);
6448 work->acrtc = acrtc;
6449 work->enable = enable;
6451 if (acrtc_state->stream) {
6452 dc_stream_retain(acrtc_state->stream);
6453 work->stream = acrtc_state->stream;
6456 queue_work(dm->vblank_control_workqueue, &work->work);
6463 static int dm_enable_vblank(struct drm_crtc *crtc)
6465 return dm_set_vblank(crtc, true);
6468 static void dm_disable_vblank(struct drm_crtc *crtc)
6470 dm_set_vblank(crtc, false);
6473 /* Implemented only the options currently availible for the driver */
6474 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6475 .reset = dm_crtc_reset_state,
6476 .destroy = amdgpu_dm_crtc_destroy,
6477 .set_config = drm_atomic_helper_set_config,
6478 .page_flip = drm_atomic_helper_page_flip,
6479 .atomic_duplicate_state = dm_crtc_duplicate_state,
6480 .atomic_destroy_state = dm_crtc_destroy_state,
6481 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
6482 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6483 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6484 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
6485 .enable_vblank = dm_enable_vblank,
6486 .disable_vblank = dm_disable_vblank,
6487 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6488 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6489 .late_register = amdgpu_dm_crtc_late_register,
6493 static enum drm_connector_status
6494 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6497 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6501 * 1. This interface is NOT called in context of HPD irq.
6502 * 2. This interface *is called* in context of user-mode ioctl. Which
6503 * makes it a bad place for *any* MST-related activity.
6506 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6507 !aconnector->fake_enable)
6508 connected = (aconnector->dc_sink != NULL);
6510 connected = (aconnector->base.force == DRM_FORCE_ON);
6512 update_subconnector_property(aconnector);
6514 return (connected ? connector_status_connected :
6515 connector_status_disconnected);
6518 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6519 struct drm_connector_state *connector_state,
6520 struct drm_property *property,
6523 struct drm_device *dev = connector->dev;
6524 struct amdgpu_device *adev = drm_to_adev(dev);
6525 struct dm_connector_state *dm_old_state =
6526 to_dm_connector_state(connector->state);
6527 struct dm_connector_state *dm_new_state =
6528 to_dm_connector_state(connector_state);
6532 if (property == dev->mode_config.scaling_mode_property) {
6533 enum amdgpu_rmx_type rmx_type;
6536 case DRM_MODE_SCALE_CENTER:
6537 rmx_type = RMX_CENTER;
6539 case DRM_MODE_SCALE_ASPECT:
6540 rmx_type = RMX_ASPECT;
6542 case DRM_MODE_SCALE_FULLSCREEN:
6543 rmx_type = RMX_FULL;
6545 case DRM_MODE_SCALE_NONE:
6551 if (dm_old_state->scaling == rmx_type)
6554 dm_new_state->scaling = rmx_type;
6556 } else if (property == adev->mode_info.underscan_hborder_property) {
6557 dm_new_state->underscan_hborder = val;
6559 } else if (property == adev->mode_info.underscan_vborder_property) {
6560 dm_new_state->underscan_vborder = val;
6562 } else if (property == adev->mode_info.underscan_property) {
6563 dm_new_state->underscan_enable = val;
6565 } else if (property == adev->mode_info.abm_level_property) {
6566 dm_new_state->abm_level = val;
6573 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6574 const struct drm_connector_state *state,
6575 struct drm_property *property,
6578 struct drm_device *dev = connector->dev;
6579 struct amdgpu_device *adev = drm_to_adev(dev);
6580 struct dm_connector_state *dm_state =
6581 to_dm_connector_state(state);
6584 if (property == dev->mode_config.scaling_mode_property) {
6585 switch (dm_state->scaling) {
6587 *val = DRM_MODE_SCALE_CENTER;
6590 *val = DRM_MODE_SCALE_ASPECT;
6593 *val = DRM_MODE_SCALE_FULLSCREEN;
6597 *val = DRM_MODE_SCALE_NONE;
6601 } else if (property == adev->mode_info.underscan_hborder_property) {
6602 *val = dm_state->underscan_hborder;
6604 } else if (property == adev->mode_info.underscan_vborder_property) {
6605 *val = dm_state->underscan_vborder;
6607 } else if (property == adev->mode_info.underscan_property) {
6608 *val = dm_state->underscan_enable;
6610 } else if (property == adev->mode_info.abm_level_property) {
6611 *val = dm_state->abm_level;
6618 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6620 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6622 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6625 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6627 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6628 const struct dc_link *link = aconnector->dc_link;
6629 struct amdgpu_device *adev = drm_to_adev(connector->dev);
6630 struct amdgpu_display_manager *dm = &adev->dm;
6634 * Call only if mst_mgr was iniitalized before since it's not done
6635 * for all connector types.
6637 if (aconnector->mst_mgr.dev)
6638 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6640 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6641 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6642 for (i = 0; i < dm->num_of_edps; i++) {
6643 if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6644 backlight_device_unregister(dm->backlight_dev[i]);
6645 dm->backlight_dev[i] = NULL;
6650 if (aconnector->dc_em_sink)
6651 dc_sink_release(aconnector->dc_em_sink);
6652 aconnector->dc_em_sink = NULL;
6653 if (aconnector->dc_sink)
6654 dc_sink_release(aconnector->dc_sink);
6655 aconnector->dc_sink = NULL;
6657 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6658 drm_connector_unregister(connector);
6659 drm_connector_cleanup(connector);
6660 if (aconnector->i2c) {
6661 i2c_del_adapter(&aconnector->i2c->base);
6662 kfree(aconnector->i2c);
6664 kfree(aconnector->dm_dp_aux.aux.name);
6669 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6671 struct dm_connector_state *state =
6672 to_dm_connector_state(connector->state);
6674 if (connector->state)
6675 __drm_atomic_helper_connector_destroy_state(connector->state);
6679 state = kzalloc(sizeof(*state), GFP_KERNEL);
6682 state->scaling = RMX_OFF;
6683 state->underscan_enable = false;
6684 state->underscan_hborder = 0;
6685 state->underscan_vborder = 0;
6686 state->base.max_requested_bpc = 8;
6687 state->vcpi_slots = 0;
6689 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6690 state->abm_level = amdgpu_dm_abm_level;
6692 __drm_atomic_helper_connector_reset(connector, &state->base);
6696 struct drm_connector_state *
6697 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6699 struct dm_connector_state *state =
6700 to_dm_connector_state(connector->state);
6702 struct dm_connector_state *new_state =
6703 kmemdup(state, sizeof(*state), GFP_KERNEL);
6708 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6710 new_state->freesync_capable = state->freesync_capable;
6711 new_state->abm_level = state->abm_level;
6712 new_state->scaling = state->scaling;
6713 new_state->underscan_enable = state->underscan_enable;
6714 new_state->underscan_hborder = state->underscan_hborder;
6715 new_state->underscan_vborder = state->underscan_vborder;
6716 new_state->vcpi_slots = state->vcpi_slots;
6717 new_state->pbn = state->pbn;
6718 return &new_state->base;
6722 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6724 struct amdgpu_dm_connector *amdgpu_dm_connector =
6725 to_amdgpu_dm_connector(connector);
6728 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6729 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6730 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6731 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6736 #if defined(CONFIG_DEBUG_FS)
6737 connector_debugfs_init(amdgpu_dm_connector);
6743 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6744 .reset = amdgpu_dm_connector_funcs_reset,
6745 .detect = amdgpu_dm_connector_detect,
6746 .fill_modes = drm_helper_probe_single_connector_modes,
6747 .destroy = amdgpu_dm_connector_destroy,
6748 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6749 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6750 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6751 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6752 .late_register = amdgpu_dm_connector_late_register,
6753 .early_unregister = amdgpu_dm_connector_unregister
6756 static int get_modes(struct drm_connector *connector)
6758 return amdgpu_dm_connector_get_modes(connector);
6761 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6763 struct dc_sink_init_data init_params = {
6764 .link = aconnector->dc_link,
6765 .sink_signal = SIGNAL_TYPE_VIRTUAL
6769 if (!aconnector->base.edid_blob_ptr) {
6770 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6771 aconnector->base.name);
6773 aconnector->base.force = DRM_FORCE_OFF;
6774 aconnector->base.override_edid = false;
6778 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6780 aconnector->edid = edid;
6782 aconnector->dc_em_sink = dc_link_add_remote_sink(
6783 aconnector->dc_link,
6785 (edid->extensions + 1) * EDID_LENGTH,
6788 if (aconnector->base.force == DRM_FORCE_ON) {
6789 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6790 aconnector->dc_link->local_sink :
6791 aconnector->dc_em_sink;
6792 dc_sink_retain(aconnector->dc_sink);
6796 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6798 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6801 * In case of headless boot with force on for DP managed connector
6802 * Those settings have to be != 0 to get initial modeset
6804 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6805 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6806 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6810 aconnector->base.override_edid = true;
6811 create_eml_sink(aconnector);
6814 static struct dc_stream_state *
6815 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6816 const struct drm_display_mode *drm_mode,
6817 const struct dm_connector_state *dm_state,
6818 const struct dc_stream_state *old_stream)
6820 struct drm_connector *connector = &aconnector->base;
6821 struct amdgpu_device *adev = drm_to_adev(connector->dev);
6822 struct dc_stream_state *stream;
6823 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6824 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6825 enum dc_status dc_result = DC_OK;
6828 stream = create_stream_for_sink(aconnector, drm_mode,
6829 dm_state, old_stream,
6831 if (stream == NULL) {
6832 DRM_ERROR("Failed to create stream for sink!\n");
6836 dc_result = dc_validate_stream(adev->dm.dc, stream);
6838 if (dc_result != DC_OK) {
6839 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6844 dc_status_to_str(dc_result));
6846 dc_stream_release(stream);
6848 requested_bpc -= 2; /* lower bpc to retry validation */
6851 } while (stream == NULL && requested_bpc >= 6);
6853 if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6854 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6856 aconnector->force_yuv420_output = true;
6857 stream = create_validate_stream_for_sink(aconnector, drm_mode,
6858 dm_state, old_stream);
6859 aconnector->force_yuv420_output = false;
6865 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6866 struct drm_display_mode *mode)
6868 int result = MODE_ERROR;
6869 struct dc_sink *dc_sink;
6870 /* TODO: Unhardcode stream count */
6871 struct dc_stream_state *stream;
6872 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6874 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6875 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6879 * Only run this the first time mode_valid is called to initilialize
6882 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6883 !aconnector->dc_em_sink)
6884 handle_edid_mgmt(aconnector);
6886 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6888 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6889 aconnector->base.force != DRM_FORCE_ON) {
6890 DRM_ERROR("dc_sink is NULL!\n");
6894 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6896 dc_stream_release(stream);
6901 /* TODO: error handling*/
6905 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6906 struct dc_info_packet *out)
6908 struct hdmi_drm_infoframe frame;
6909 unsigned char buf[30]; /* 26 + 4 */
6913 memset(out, 0, sizeof(*out));
6915 if (!state->hdr_output_metadata)
6918 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6922 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6926 /* Static metadata is a fixed 26 bytes + 4 byte header. */
6930 /* Prepare the infopacket for DC. */
6931 switch (state->connector->connector_type) {
6932 case DRM_MODE_CONNECTOR_HDMIA:
6933 out->hb0 = 0x87; /* type */
6934 out->hb1 = 0x01; /* version */
6935 out->hb2 = 0x1A; /* length */
6936 out->sb[0] = buf[3]; /* checksum */
6940 case DRM_MODE_CONNECTOR_DisplayPort:
6941 case DRM_MODE_CONNECTOR_eDP:
6942 out->hb0 = 0x00; /* sdp id, zero */
6943 out->hb1 = 0x87; /* type */
6944 out->hb2 = 0x1D; /* payload len - 1 */
6945 out->hb3 = (0x13 << 2); /* sdp version */
6946 out->sb[0] = 0x01; /* version */
6947 out->sb[1] = 0x1A; /* length */
6955 memcpy(&out->sb[i], &buf[4], 26);
6958 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6959 sizeof(out->sb), false);
6965 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6966 struct drm_atomic_state *state)
6968 struct drm_connector_state *new_con_state =
6969 drm_atomic_get_new_connector_state(state, conn);
6970 struct drm_connector_state *old_con_state =
6971 drm_atomic_get_old_connector_state(state, conn);
6972 struct drm_crtc *crtc = new_con_state->crtc;
6973 struct drm_crtc_state *new_crtc_state;
6976 trace_amdgpu_dm_connector_atomic_check(new_con_state);
6981 if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
6982 struct dc_info_packet hdr_infopacket;
6984 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6988 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6989 if (IS_ERR(new_crtc_state))
6990 return PTR_ERR(new_crtc_state);
6993 * DC considers the stream backends changed if the
6994 * static metadata changes. Forcing the modeset also
6995 * gives a simple way for userspace to switch from
6996 * 8bpc to 10bpc when setting the metadata to enter
6999 * Changing the static metadata after it's been
7000 * set is permissible, however. So only force a
7001 * modeset if we're entering or exiting HDR.
7003 new_crtc_state->mode_changed =
7004 !old_con_state->hdr_output_metadata ||
7005 !new_con_state->hdr_output_metadata;
7011 static const struct drm_connector_helper_funcs
7012 amdgpu_dm_connector_helper_funcs = {
7014 * If hotplugging a second bigger display in FB Con mode, bigger resolution
7015 * modes will be filtered by drm_mode_validate_size(), and those modes
7016 * are missing after user start lightdm. So we need to renew modes list.
7017 * in get_modes call back, not just return the modes count
7019 .get_modes = get_modes,
7020 .mode_valid = amdgpu_dm_connector_mode_valid,
7021 .atomic_check = amdgpu_dm_connector_atomic_check,
7024 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7028 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
7030 struct drm_atomic_state *state = new_crtc_state->state;
7031 struct drm_plane *plane;
7034 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7035 struct drm_plane_state *new_plane_state;
7037 /* Cursor planes are "fake". */
7038 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7041 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7043 if (!new_plane_state) {
7045 * The plane is enable on the CRTC and hasn't changed
7046 * state. This means that it previously passed
7047 * validation and is therefore enabled.
7053 /* We need a framebuffer to be considered enabled. */
7054 num_active += (new_plane_state->fb != NULL);
7060 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7061 struct drm_crtc_state *new_crtc_state)
7063 struct dm_crtc_state *dm_new_crtc_state =
7064 to_dm_crtc_state(new_crtc_state);
7066 dm_new_crtc_state->active_planes = 0;
7068 if (!dm_new_crtc_state->stream)
7071 dm_new_crtc_state->active_planes =
7072 count_crtc_active_planes(new_crtc_state);
7075 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
7076 struct drm_atomic_state *state)
7078 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7080 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
7081 struct dc *dc = adev->dm.dc;
7082 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
7085 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
7087 dm_update_crtc_active_planes(crtc, crtc_state);
7089 if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7090 modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
7095 * We require the primary plane to be enabled whenever the CRTC is, otherwise
7096 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7097 * planes are disabled, which is not supported by the hardware. And there is legacy
7098 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
7100 if (crtc_state->enable &&
7101 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7102 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
7106 /* In some use cases, like reset, no stream is attached */
7107 if (!dm_crtc_state->stream)
7110 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
7113 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
7117 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7118 const struct drm_display_mode *mode,
7119 struct drm_display_mode *adjusted_mode)
7124 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7125 .disable = dm_crtc_helper_disable,
7126 .atomic_check = dm_crtc_helper_atomic_check,
7127 .mode_fixup = dm_crtc_helper_mode_fixup,
7128 .get_scanout_position = amdgpu_crtc_get_scanout_position,
7131 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7136 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7138 switch (display_color_depth) {
7139 case COLOR_DEPTH_666:
7141 case COLOR_DEPTH_888:
7143 case COLOR_DEPTH_101010:
7145 case COLOR_DEPTH_121212:
7147 case COLOR_DEPTH_141414:
7149 case COLOR_DEPTH_161616:
7157 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7158 struct drm_crtc_state *crtc_state,
7159 struct drm_connector_state *conn_state)
7161 struct drm_atomic_state *state = crtc_state->state;
7162 struct drm_connector *connector = conn_state->connector;
7163 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7164 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7165 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7166 struct drm_dp_mst_topology_mgr *mst_mgr;
7167 struct drm_dp_mst_port *mst_port;
7168 enum dc_color_depth color_depth;
7170 bool is_y420 = false;
7172 if (!aconnector->port || !aconnector->dc_sink)
7175 mst_port = aconnector->port;
7176 mst_mgr = &aconnector->mst_port->mst_mgr;
7178 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7181 if (!state->duplicated) {
7182 int max_bpc = conn_state->max_requested_bpc;
7183 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7184 aconnector->force_yuv420_output;
7185 color_depth = convert_color_depth_from_display_info(connector,
7188 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7189 clock = adjusted_mode->clock;
7190 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
7192 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7195 dm_new_connector_state->pbn,
7196 dm_mst_get_pbn_divider(aconnector->dc_link));
7197 if (dm_new_connector_state->vcpi_slots < 0) {
7198 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7199 return dm_new_connector_state->vcpi_slots;
7204 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7205 .disable = dm_encoder_helper_disable,
7206 .atomic_check = dm_encoder_helper_atomic_check
7209 #if defined(CONFIG_DRM_AMD_DC_DCN)
7210 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7211 struct dc_state *dc_state,
7212 struct dsc_mst_fairness_vars *vars)
7214 struct dc_stream_state *stream = NULL;
7215 struct drm_connector *connector;
7216 struct drm_connector_state *new_con_state;
7217 struct amdgpu_dm_connector *aconnector;
7218 struct dm_connector_state *dm_conn_state;
7220 int vcpi, pbn_div, pbn = 0;
7222 for_each_new_connector_in_state(state, connector, new_con_state, i) {
7224 aconnector = to_amdgpu_dm_connector(connector);
7226 if (!aconnector->port)
7229 if (!new_con_state || !new_con_state->crtc)
7232 dm_conn_state = to_dm_connector_state(new_con_state);
7234 for (j = 0; j < dc_state->stream_count; j++) {
7235 stream = dc_state->streams[j];
7239 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7248 if (stream->timing.flags.DSC != 1) {
7249 drm_dp_mst_atomic_enable_dsc(state,
7257 pbn_div = dm_mst_get_pbn_divider(stream->link);
7258 clock = stream->timing.pix_clk_100hz / 10;
7259 /* pbn is calculated by compute_mst_dsc_configs_for_state*/
7260 for (j = 0; j < dc_state->stream_count; j++) {
7261 if (vars[j].aconnector == aconnector) {
7267 vcpi = drm_dp_mst_atomic_enable_dsc(state,
7274 dm_conn_state->pbn = pbn;
7275 dm_conn_state->vcpi_slots = vcpi;
7281 static void dm_drm_plane_reset(struct drm_plane *plane)
7283 struct dm_plane_state *amdgpu_state = NULL;
7286 plane->funcs->atomic_destroy_state(plane, plane->state);
7288 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
7289 WARN_ON(amdgpu_state == NULL);
7292 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
7295 static struct drm_plane_state *
7296 dm_drm_plane_duplicate_state(struct drm_plane *plane)
7298 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7300 old_dm_plane_state = to_dm_plane_state(plane->state);
7301 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7302 if (!dm_plane_state)
7305 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7307 if (old_dm_plane_state->dc_state) {
7308 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7309 dc_plane_state_retain(dm_plane_state->dc_state);
7312 return &dm_plane_state->base;
7315 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
7316 struct drm_plane_state *state)
7318 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7320 if (dm_plane_state->dc_state)
7321 dc_plane_state_release(dm_plane_state->dc_state);
7323 drm_atomic_helper_plane_destroy_state(plane, state);
7326 static const struct drm_plane_funcs dm_plane_funcs = {
7327 .update_plane = drm_atomic_helper_update_plane,
7328 .disable_plane = drm_atomic_helper_disable_plane,
7329 .destroy = drm_primary_helper_destroy,
7330 .reset = dm_drm_plane_reset,
7331 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
7332 .atomic_destroy_state = dm_drm_plane_destroy_state,
7333 .format_mod_supported = dm_plane_format_mod_supported,
7336 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7337 struct drm_plane_state *new_state)
7339 struct amdgpu_framebuffer *afb;
7340 struct drm_gem_object *obj;
7341 struct amdgpu_device *adev;
7342 struct amdgpu_bo *rbo;
7343 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
7344 struct list_head list;
7345 struct ttm_validate_buffer tv;
7346 struct ww_acquire_ctx ticket;
7350 if (!new_state->fb) {
7351 DRM_DEBUG_KMS("No FB bound\n");
7355 afb = to_amdgpu_framebuffer(new_state->fb);
7356 obj = new_state->fb->obj[0];
7357 rbo = gem_to_amdgpu_bo(obj);
7358 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7359 INIT_LIST_HEAD(&list);
7363 list_add(&tv.head, &list);
7365 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
7367 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7371 if (plane->type != DRM_PLANE_TYPE_CURSOR)
7372 domain = amdgpu_display_supported_domains(adev, rbo->flags);
7374 domain = AMDGPU_GEM_DOMAIN_VRAM;
7376 r = amdgpu_bo_pin(rbo, domain);
7377 if (unlikely(r != 0)) {
7378 if (r != -ERESTARTSYS)
7379 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7380 ttm_eu_backoff_reservation(&ticket, &list);
7384 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7385 if (unlikely(r != 0)) {
7386 amdgpu_bo_unpin(rbo);
7387 ttm_eu_backoff_reservation(&ticket, &list);
7388 DRM_ERROR("%p bind failed\n", rbo);
7392 ttm_eu_backoff_reservation(&ticket, &list);
7394 afb->address = amdgpu_bo_gpu_offset(rbo);
7399 * We don't do surface updates on planes that have been newly created,
7400 * but we also don't have the afb->address during atomic check.
7402 * Fill in buffer attributes depending on the address here, but only on
7403 * newly created planes since they're not being used by DC yet and this
7404 * won't modify global state.
7406 dm_plane_state_old = to_dm_plane_state(plane->state);
7407 dm_plane_state_new = to_dm_plane_state(new_state);
7409 if (dm_plane_state_new->dc_state &&
7410 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7411 struct dc_plane_state *plane_state =
7412 dm_plane_state_new->dc_state;
7413 bool force_disable_dcc = !plane_state->dcc.enable;
7415 fill_plane_buffer_attributes(
7416 adev, afb, plane_state->format, plane_state->rotation,
7418 &plane_state->tiling_info, &plane_state->plane_size,
7419 &plane_state->dcc, &plane_state->address,
7420 afb->tmz_surface, force_disable_dcc);
7426 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7427 struct drm_plane_state *old_state)
7429 struct amdgpu_bo *rbo;
7435 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7436 r = amdgpu_bo_reserve(rbo, false);
7438 DRM_ERROR("failed to reserve rbo before unpin\n");
7442 amdgpu_bo_unpin(rbo);
7443 amdgpu_bo_unreserve(rbo);
7444 amdgpu_bo_unref(&rbo);
7447 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7448 struct drm_crtc_state *new_crtc_state)
7450 struct drm_framebuffer *fb = state->fb;
7451 int min_downscale, max_upscale;
7453 int max_scale = INT_MAX;
7455 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7456 if (fb && state->crtc) {
7457 /* Validate viewport to cover the case when only the position changes */
7458 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7459 int viewport_width = state->crtc_w;
7460 int viewport_height = state->crtc_h;
7462 if (state->crtc_x < 0)
7463 viewport_width += state->crtc_x;
7464 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7465 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7467 if (state->crtc_y < 0)
7468 viewport_height += state->crtc_y;
7469 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7470 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7472 if (viewport_width < 0 || viewport_height < 0) {
7473 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7475 } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7476 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7478 } else if (viewport_height < MIN_VIEWPORT_SIZE) {
7479 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7485 /* Get min/max allowed scaling factors from plane caps. */
7486 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7487 &min_downscale, &max_upscale);
7489 * Convert to drm convention: 16.16 fixed point, instead of dc's
7490 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7491 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7493 min_scale = (1000 << 16) / max_upscale;
7494 max_scale = (1000 << 16) / min_downscale;
7497 return drm_atomic_helper_check_plane_state(
7498 state, new_crtc_state, min_scale, max_scale, true, true);
7501 static int dm_plane_atomic_check(struct drm_plane *plane,
7502 struct drm_atomic_state *state)
7504 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7506 struct amdgpu_device *adev = drm_to_adev(plane->dev);
7507 struct dc *dc = adev->dm.dc;
7508 struct dm_plane_state *dm_plane_state;
7509 struct dc_scaling_info scaling_info;
7510 struct drm_crtc_state *new_crtc_state;
7513 trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7515 dm_plane_state = to_dm_plane_state(new_plane_state);
7517 if (!dm_plane_state->dc_state)
7521 drm_atomic_get_new_crtc_state(state,
7522 new_plane_state->crtc);
7523 if (!new_crtc_state)
7526 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7530 ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
7534 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7540 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7541 struct drm_atomic_state *state)
7543 /* Only support async updates on cursor planes. */
7544 if (plane->type != DRM_PLANE_TYPE_CURSOR)
7550 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7551 struct drm_atomic_state *state)
7553 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7555 struct drm_plane_state *old_state =
7556 drm_atomic_get_old_plane_state(state, plane);
7558 trace_amdgpu_dm_atomic_update_cursor(new_state);
7560 swap(plane->state->fb, new_state->fb);
7562 plane->state->src_x = new_state->src_x;
7563 plane->state->src_y = new_state->src_y;
7564 plane->state->src_w = new_state->src_w;
7565 plane->state->src_h = new_state->src_h;
7566 plane->state->crtc_x = new_state->crtc_x;
7567 plane->state->crtc_y = new_state->crtc_y;
7568 plane->state->crtc_w = new_state->crtc_w;
7569 plane->state->crtc_h = new_state->crtc_h;
7571 handle_cursor_update(plane, old_state);
7574 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7575 .prepare_fb = dm_plane_helper_prepare_fb,
7576 .cleanup_fb = dm_plane_helper_cleanup_fb,
7577 .atomic_check = dm_plane_atomic_check,
7578 .atomic_async_check = dm_plane_atomic_async_check,
7579 .atomic_async_update = dm_plane_atomic_async_update
7583 * TODO: these are currently initialized to rgb formats only.
7584 * For future use cases we should either initialize them dynamically based on
7585 * plane capabilities, or initialize this array to all formats, so internal drm
7586 * check will succeed, and let DC implement proper check
7588 static const uint32_t rgb_formats[] = {
7589 DRM_FORMAT_XRGB8888,
7590 DRM_FORMAT_ARGB8888,
7591 DRM_FORMAT_RGBA8888,
7592 DRM_FORMAT_XRGB2101010,
7593 DRM_FORMAT_XBGR2101010,
7594 DRM_FORMAT_ARGB2101010,
7595 DRM_FORMAT_ABGR2101010,
7596 DRM_FORMAT_XRGB16161616,
7597 DRM_FORMAT_XBGR16161616,
7598 DRM_FORMAT_ARGB16161616,
7599 DRM_FORMAT_ABGR16161616,
7600 DRM_FORMAT_XBGR8888,
7601 DRM_FORMAT_ABGR8888,
7605 static const uint32_t overlay_formats[] = {
7606 DRM_FORMAT_XRGB8888,
7607 DRM_FORMAT_ARGB8888,
7608 DRM_FORMAT_RGBA8888,
7609 DRM_FORMAT_XBGR8888,
7610 DRM_FORMAT_ABGR8888,
7614 static const u32 cursor_formats[] = {
7618 static int get_plane_formats(const struct drm_plane *plane,
7619 const struct dc_plane_cap *plane_cap,
7620 uint32_t *formats, int max_formats)
7622 int i, num_formats = 0;
7625 * TODO: Query support for each group of formats directly from
7626 * DC plane caps. This will require adding more formats to the
7630 switch (plane->type) {
7631 case DRM_PLANE_TYPE_PRIMARY:
7632 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7633 if (num_formats >= max_formats)
7636 formats[num_formats++] = rgb_formats[i];
7639 if (plane_cap && plane_cap->pixel_format_support.nv12)
7640 formats[num_formats++] = DRM_FORMAT_NV12;
7641 if (plane_cap && plane_cap->pixel_format_support.p010)
7642 formats[num_formats++] = DRM_FORMAT_P010;
7643 if (plane_cap && plane_cap->pixel_format_support.fp16) {
7644 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7645 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7646 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7647 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7651 case DRM_PLANE_TYPE_OVERLAY:
7652 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7653 if (num_formats >= max_formats)
7656 formats[num_formats++] = overlay_formats[i];
7660 case DRM_PLANE_TYPE_CURSOR:
7661 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7662 if (num_formats >= max_formats)
7665 formats[num_formats++] = cursor_formats[i];
7673 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7674 struct drm_plane *plane,
7675 unsigned long possible_crtcs,
7676 const struct dc_plane_cap *plane_cap)
7678 uint32_t formats[32];
7681 unsigned int supported_rotations;
7682 uint64_t *modifiers = NULL;
7684 num_formats = get_plane_formats(plane, plane_cap, formats,
7685 ARRAY_SIZE(formats));
7687 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7691 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7692 &dm_plane_funcs, formats, num_formats,
7693 modifiers, plane->type, NULL);
7698 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7699 plane_cap && plane_cap->per_pixel_alpha) {
7700 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7701 BIT(DRM_MODE_BLEND_PREMULTI);
7703 drm_plane_create_alpha_property(plane);
7704 drm_plane_create_blend_mode_property(plane, blend_caps);
7707 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7709 (plane_cap->pixel_format_support.nv12 ||
7710 plane_cap->pixel_format_support.p010)) {
7711 /* This only affects YUV formats. */
7712 drm_plane_create_color_properties(
7714 BIT(DRM_COLOR_YCBCR_BT601) |
7715 BIT(DRM_COLOR_YCBCR_BT709) |
7716 BIT(DRM_COLOR_YCBCR_BT2020),
7717 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7718 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7719 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7722 supported_rotations =
7723 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7724 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7726 if (dm->adev->asic_type >= CHIP_BONAIRE &&
7727 plane->type != DRM_PLANE_TYPE_CURSOR)
7728 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7729 supported_rotations);
7731 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7733 /* Create (reset) the plane state */
7734 if (plane->funcs->reset)
7735 plane->funcs->reset(plane);
7740 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7741 struct drm_plane *plane,
7742 uint32_t crtc_index)
7744 struct amdgpu_crtc *acrtc = NULL;
7745 struct drm_plane *cursor_plane;
7749 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7753 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7754 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7756 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7760 res = drm_crtc_init_with_planes(
7765 &amdgpu_dm_crtc_funcs, NULL);
7770 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7772 /* Create (reset) the plane state */
7773 if (acrtc->base.funcs->reset)
7774 acrtc->base.funcs->reset(&acrtc->base);
7776 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7777 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7779 acrtc->crtc_id = crtc_index;
7780 acrtc->base.enabled = false;
7781 acrtc->otg_inst = -1;
7783 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7784 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7785 true, MAX_COLOR_LUT_ENTRIES);
7786 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7792 kfree(cursor_plane);
7797 static int to_drm_connector_type(enum signal_type st)
7800 case SIGNAL_TYPE_HDMI_TYPE_A:
7801 return DRM_MODE_CONNECTOR_HDMIA;
7802 case SIGNAL_TYPE_EDP:
7803 return DRM_MODE_CONNECTOR_eDP;
7804 case SIGNAL_TYPE_LVDS:
7805 return DRM_MODE_CONNECTOR_LVDS;
7806 case SIGNAL_TYPE_RGB:
7807 return DRM_MODE_CONNECTOR_VGA;
7808 case SIGNAL_TYPE_DISPLAY_PORT:
7809 case SIGNAL_TYPE_DISPLAY_PORT_MST:
7810 return DRM_MODE_CONNECTOR_DisplayPort;
7811 case SIGNAL_TYPE_DVI_DUAL_LINK:
7812 case SIGNAL_TYPE_DVI_SINGLE_LINK:
7813 return DRM_MODE_CONNECTOR_DVID;
7814 case SIGNAL_TYPE_VIRTUAL:
7815 return DRM_MODE_CONNECTOR_VIRTUAL;
7818 return DRM_MODE_CONNECTOR_Unknown;
7822 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7824 struct drm_encoder *encoder;
7826 /* There is only one encoder per connector */
7827 drm_connector_for_each_possible_encoder(connector, encoder)
7833 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7835 struct drm_encoder *encoder;
7836 struct amdgpu_encoder *amdgpu_encoder;
7838 encoder = amdgpu_dm_connector_to_encoder(connector);
7840 if (encoder == NULL)
7843 amdgpu_encoder = to_amdgpu_encoder(encoder);
7845 amdgpu_encoder->native_mode.clock = 0;
7847 if (!list_empty(&connector->probed_modes)) {
7848 struct drm_display_mode *preferred_mode = NULL;
7850 list_for_each_entry(preferred_mode,
7851 &connector->probed_modes,
7853 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7854 amdgpu_encoder->native_mode = *preferred_mode;
7862 static struct drm_display_mode *
7863 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7865 int hdisplay, int vdisplay)
7867 struct drm_device *dev = encoder->dev;
7868 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7869 struct drm_display_mode *mode = NULL;
7870 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7872 mode = drm_mode_duplicate(dev, native_mode);
7877 mode->hdisplay = hdisplay;
7878 mode->vdisplay = vdisplay;
7879 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7880 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7886 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7887 struct drm_connector *connector)
7889 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7890 struct drm_display_mode *mode = NULL;
7891 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7892 struct amdgpu_dm_connector *amdgpu_dm_connector =
7893 to_amdgpu_dm_connector(connector);
7897 char name[DRM_DISPLAY_MODE_LEN];
7900 } common_modes[] = {
7901 { "640x480", 640, 480},
7902 { "800x600", 800, 600},
7903 { "1024x768", 1024, 768},
7904 { "1280x720", 1280, 720},
7905 { "1280x800", 1280, 800},
7906 {"1280x1024", 1280, 1024},
7907 { "1440x900", 1440, 900},
7908 {"1680x1050", 1680, 1050},
7909 {"1600x1200", 1600, 1200},
7910 {"1920x1080", 1920, 1080},
7911 {"1920x1200", 1920, 1200}
7914 n = ARRAY_SIZE(common_modes);
7916 for (i = 0; i < n; i++) {
7917 struct drm_display_mode *curmode = NULL;
7918 bool mode_existed = false;
7920 if (common_modes[i].w > native_mode->hdisplay ||
7921 common_modes[i].h > native_mode->vdisplay ||
7922 (common_modes[i].w == native_mode->hdisplay &&
7923 common_modes[i].h == native_mode->vdisplay))
7926 list_for_each_entry(curmode, &connector->probed_modes, head) {
7927 if (common_modes[i].w == curmode->hdisplay &&
7928 common_modes[i].h == curmode->vdisplay) {
7929 mode_existed = true;
7937 mode = amdgpu_dm_create_common_mode(encoder,
7938 common_modes[i].name, common_modes[i].w,
7943 drm_mode_probed_add(connector, mode);
7944 amdgpu_dm_connector->num_modes++;
7948 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
7950 struct drm_encoder *encoder;
7951 struct amdgpu_encoder *amdgpu_encoder;
7952 const struct drm_display_mode *native_mode;
7954 if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
7955 connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
7958 encoder = amdgpu_dm_connector_to_encoder(connector);
7962 amdgpu_encoder = to_amdgpu_encoder(encoder);
7964 native_mode = &amdgpu_encoder->native_mode;
7965 if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
7968 drm_connector_set_panel_orientation_with_quirk(connector,
7969 DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
7970 native_mode->hdisplay,
7971 native_mode->vdisplay);
7974 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7977 struct amdgpu_dm_connector *amdgpu_dm_connector =
7978 to_amdgpu_dm_connector(connector);
7981 /* empty probed_modes */
7982 INIT_LIST_HEAD(&connector->probed_modes);
7983 amdgpu_dm_connector->num_modes =
7984 drm_add_edid_modes(connector, edid);
7986 /* sorting the probed modes before calling function
7987 * amdgpu_dm_get_native_mode() since EDID can have
7988 * more than one preferred mode. The modes that are
7989 * later in the probed mode list could be of higher
7990 * and preferred resolution. For example, 3840x2160
7991 * resolution in base EDID preferred timing and 4096x2160
7992 * preferred resolution in DID extension block later.
7994 drm_mode_sort(&connector->probed_modes);
7995 amdgpu_dm_get_native_mode(connector);
7997 /* Freesync capabilities are reset by calling
7998 * drm_add_edid_modes() and need to be
8001 amdgpu_dm_update_freesync_caps(connector, edid);
8003 amdgpu_set_panel_orientation(connector);
8005 amdgpu_dm_connector->num_modes = 0;
8009 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8010 struct drm_display_mode *mode)
8012 struct drm_display_mode *m;
8014 list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8015 if (drm_mode_equal(m, mode))
8022 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8024 const struct drm_display_mode *m;
8025 struct drm_display_mode *new_mode;
8027 uint32_t new_modes_count = 0;
8029 /* Standard FPS values
8038 * 60 - Commonly used
8039 * 48,72,96 - Multiples of 24
8041 static const uint32_t common_rates[] = {
8042 23976, 24000, 25000, 29970, 30000,
8043 48000, 50000, 60000, 72000, 96000
8047 * Find mode with highest refresh rate with the same resolution
8048 * as the preferred mode. Some monitors report a preferred mode
8049 * with lower resolution than the highest refresh rate supported.
8052 m = get_highest_refresh_rate_mode(aconnector, true);
8056 for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8057 uint64_t target_vtotal, target_vtotal_diff;
8060 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8063 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8064 common_rates[i] > aconnector->max_vfreq * 1000)
8067 num = (unsigned long long)m->clock * 1000 * 1000;
8068 den = common_rates[i] * (unsigned long long)m->htotal;
8069 target_vtotal = div_u64(num, den);
8070 target_vtotal_diff = target_vtotal - m->vtotal;
8072 /* Check for illegal modes */
8073 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8074 m->vsync_end + target_vtotal_diff < m->vsync_start ||
8075 m->vtotal + target_vtotal_diff < m->vsync_end)
8078 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8082 new_mode->vtotal += (u16)target_vtotal_diff;
8083 new_mode->vsync_start += (u16)target_vtotal_diff;
8084 new_mode->vsync_end += (u16)target_vtotal_diff;
8085 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8086 new_mode->type |= DRM_MODE_TYPE_DRIVER;
8088 if (!is_duplicate_mode(aconnector, new_mode)) {
8089 drm_mode_probed_add(&aconnector->base, new_mode);
8090 new_modes_count += 1;
8092 drm_mode_destroy(aconnector->base.dev, new_mode);
8095 return new_modes_count;
8098 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8101 struct amdgpu_dm_connector *amdgpu_dm_connector =
8102 to_amdgpu_dm_connector(connector);
8104 if (!(amdgpu_freesync_vid_mode && edid))
8107 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8108 amdgpu_dm_connector->num_modes +=
8109 add_fs_modes(amdgpu_dm_connector);
8112 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
8114 struct amdgpu_dm_connector *amdgpu_dm_connector =
8115 to_amdgpu_dm_connector(connector);
8116 struct drm_encoder *encoder;
8117 struct edid *edid = amdgpu_dm_connector->edid;
8119 encoder = amdgpu_dm_connector_to_encoder(connector);
8121 if (!drm_edid_is_valid(edid)) {
8122 amdgpu_dm_connector->num_modes =
8123 drm_add_modes_noedid(connector, 640, 480);
8125 amdgpu_dm_connector_ddc_get_modes(connector, edid);
8126 amdgpu_dm_connector_add_common_modes(encoder, connector);
8127 amdgpu_dm_connector_add_freesync_modes(connector, edid);
8129 amdgpu_dm_fbc_init(connector);
8131 return amdgpu_dm_connector->num_modes;
8134 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8135 struct amdgpu_dm_connector *aconnector,
8137 struct dc_link *link,
8140 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8143 * Some of the properties below require access to state, like bpc.
8144 * Allocate some default initial connector state with our reset helper.
8146 if (aconnector->base.funcs->reset)
8147 aconnector->base.funcs->reset(&aconnector->base);
8149 aconnector->connector_id = link_index;
8150 aconnector->dc_link = link;
8151 aconnector->base.interlace_allowed = false;
8152 aconnector->base.doublescan_allowed = false;
8153 aconnector->base.stereo_allowed = false;
8154 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8155 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8156 aconnector->audio_inst = -1;
8157 mutex_init(&aconnector->hpd_lock);
8160 * configure support HPD hot plug connector_>polled default value is 0
8161 * which means HPD hot plug not supported
8163 switch (connector_type) {
8164 case DRM_MODE_CONNECTOR_HDMIA:
8165 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8166 aconnector->base.ycbcr_420_allowed =
8167 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8169 case DRM_MODE_CONNECTOR_DisplayPort:
8170 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8171 aconnector->base.ycbcr_420_allowed =
8172 link->link_enc->features.dp_ycbcr420_supported ? true : false;
8174 case DRM_MODE_CONNECTOR_DVID:
8175 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8181 drm_object_attach_property(&aconnector->base.base,
8182 dm->ddev->mode_config.scaling_mode_property,
8183 DRM_MODE_SCALE_NONE);
8185 drm_object_attach_property(&aconnector->base.base,
8186 adev->mode_info.underscan_property,
8188 drm_object_attach_property(&aconnector->base.base,
8189 adev->mode_info.underscan_hborder_property,
8191 drm_object_attach_property(&aconnector->base.base,
8192 adev->mode_info.underscan_vborder_property,
8195 if (!aconnector->mst_port)
8196 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8198 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
8199 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8200 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8202 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
8203 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
8204 drm_object_attach_property(&aconnector->base.base,
8205 adev->mode_info.abm_level_property, 0);
8208 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8209 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8210 connector_type == DRM_MODE_CONNECTOR_eDP) {
8211 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8213 if (!aconnector->mst_port)
8214 drm_connector_attach_vrr_capable_property(&aconnector->base);
8216 #ifdef CONFIG_DRM_AMD_DC_HDCP
8217 if (adev->dm.hdcp_workqueue)
8218 drm_connector_attach_content_protection_property(&aconnector->base, true);
8223 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8224 struct i2c_msg *msgs, int num)
8226 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8227 struct ddc_service *ddc_service = i2c->ddc_service;
8228 struct i2c_command cmd;
8232 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8237 cmd.number_of_payloads = num;
8238 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8241 for (i = 0; i < num; i++) {
8242 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8243 cmd.payloads[i].address = msgs[i].addr;
8244 cmd.payloads[i].length = msgs[i].len;
8245 cmd.payloads[i].data = msgs[i].buf;
8249 ddc_service->ctx->dc,
8250 ddc_service->ddc_pin->hw_info.ddc_channel,
8254 kfree(cmd.payloads);
8258 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8260 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8263 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8264 .master_xfer = amdgpu_dm_i2c_xfer,
8265 .functionality = amdgpu_dm_i2c_func,
8268 static struct amdgpu_i2c_adapter *
8269 create_i2c(struct ddc_service *ddc_service,
8273 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8274 struct amdgpu_i2c_adapter *i2c;
8276 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8279 i2c->base.owner = THIS_MODULE;
8280 i2c->base.class = I2C_CLASS_DDC;
8281 i2c->base.dev.parent = &adev->pdev->dev;
8282 i2c->base.algo = &amdgpu_dm_i2c_algo;
8283 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
8284 i2c_set_adapdata(&i2c->base, i2c);
8285 i2c->ddc_service = ddc_service;
8286 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
8293 * Note: this function assumes that dc_link_detect() was called for the
8294 * dc_link which will be represented by this aconnector.
8296 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8297 struct amdgpu_dm_connector *aconnector,
8298 uint32_t link_index,
8299 struct amdgpu_encoder *aencoder)
8303 struct dc *dc = dm->dc;
8304 struct dc_link *link = dc_get_link_at_index(dc, link_index);
8305 struct amdgpu_i2c_adapter *i2c;
8307 link->priv = aconnector;
8309 DRM_DEBUG_DRIVER("%s()\n", __func__);
8311 i2c = create_i2c(link->ddc, link->link_index, &res);
8313 DRM_ERROR("Failed to create i2c adapter data\n");
8317 aconnector->i2c = i2c;
8318 res = i2c_add_adapter(&i2c->base);
8321 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8325 connector_type = to_drm_connector_type(link->connector_signal);
8327 res = drm_connector_init_with_ddc(
8330 &amdgpu_dm_connector_funcs,
8335 DRM_ERROR("connector_init failed\n");
8336 aconnector->connector_id = -1;
8340 drm_connector_helper_add(
8342 &amdgpu_dm_connector_helper_funcs);
8344 amdgpu_dm_connector_init_helper(
8351 drm_connector_attach_encoder(
8352 &aconnector->base, &aencoder->base);
8354 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8355 || connector_type == DRM_MODE_CONNECTOR_eDP)
8356 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8361 aconnector->i2c = NULL;
8366 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8368 switch (adev->mode_info.num_crtc) {
8385 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8386 struct amdgpu_encoder *aencoder,
8387 uint32_t link_index)
8389 struct amdgpu_device *adev = drm_to_adev(dev);
8391 int res = drm_encoder_init(dev,
8393 &amdgpu_dm_encoder_funcs,
8394 DRM_MODE_ENCODER_TMDS,
8397 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8400 aencoder->encoder_id = link_index;
8402 aencoder->encoder_id = -1;
8404 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8409 static void manage_dm_interrupts(struct amdgpu_device *adev,
8410 struct amdgpu_crtc *acrtc,
8414 * We have no guarantee that the frontend index maps to the same
8415 * backend index - some even map to more than one.
8417 * TODO: Use a different interrupt or check DC itself for the mapping.
8420 amdgpu_display_crtc_idx_to_irq_type(
8425 drm_crtc_vblank_on(&acrtc->base);
8428 &adev->pageflip_irq,
8430 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8437 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8445 &adev->pageflip_irq,
8447 drm_crtc_vblank_off(&acrtc->base);
8451 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8452 struct amdgpu_crtc *acrtc)
8455 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8458 * This reads the current state for the IRQ and force reapplies
8459 * the setting to hardware.
8461 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8465 is_scaling_state_different(const struct dm_connector_state *dm_state,
8466 const struct dm_connector_state *old_dm_state)
8468 if (dm_state->scaling != old_dm_state->scaling)
8470 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8471 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8473 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8474 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8476 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8477 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8482 #ifdef CONFIG_DRM_AMD_DC_HDCP
8483 static bool is_content_protection_different(struct drm_connector_state *state,
8484 const struct drm_connector_state *old_state,
8485 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8487 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8488 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8490 /* Handle: Type0/1 change */
8491 if (old_state->hdcp_content_type != state->hdcp_content_type &&
8492 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8493 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8497 /* CP is being re enabled, ignore this
8499 * Handles: ENABLED -> DESIRED
8501 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8502 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8503 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8507 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8509 * Handles: UNDESIRED -> ENABLED
8511 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8512 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8513 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8515 /* Stream removed and re-enabled
8517 * Can sometimes overlap with the HPD case,
8518 * thus set update_hdcp to false to avoid
8519 * setting HDCP multiple times.
8521 * Handles: DESIRED -> DESIRED (Special case)
8523 if (!(old_state->crtc && old_state->crtc->enabled) &&
8524 state->crtc && state->crtc->enabled &&
8525 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8526 dm_con_state->update_hdcp = false;
8530 /* Hot-plug, headless s3, dpms
8532 * Only start HDCP if the display is connected/enabled.
8533 * update_hdcp flag will be set to false until the next
8536 * Handles: DESIRED -> DESIRED (Special case)
8538 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8539 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8540 dm_con_state->update_hdcp = false;
8545 * Handles: UNDESIRED -> UNDESIRED
8546 * DESIRED -> DESIRED
8547 * ENABLED -> ENABLED
8549 if (old_state->content_protection == state->content_protection)
8553 * Handles: UNDESIRED -> DESIRED
8554 * DESIRED -> UNDESIRED
8555 * ENABLED -> UNDESIRED
8557 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8561 * Handles: DESIRED -> ENABLED
8567 static void remove_stream(struct amdgpu_device *adev,
8568 struct amdgpu_crtc *acrtc,
8569 struct dc_stream_state *stream)
8571 /* this is the update mode case */
8573 acrtc->otg_inst = -1;
8574 acrtc->enabled = false;
8577 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8578 struct dc_cursor_position *position)
8580 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8582 int xorigin = 0, yorigin = 0;
8584 if (!crtc || !plane->state->fb)
8587 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8588 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8589 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8591 plane->state->crtc_w,
8592 plane->state->crtc_h);
8596 x = plane->state->crtc_x;
8597 y = plane->state->crtc_y;
8599 if (x <= -amdgpu_crtc->max_cursor_width ||
8600 y <= -amdgpu_crtc->max_cursor_height)
8604 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8608 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8611 position->enable = true;
8612 position->translate_by_source = true;
8615 position->x_hotspot = xorigin;
8616 position->y_hotspot = yorigin;
8621 static void handle_cursor_update(struct drm_plane *plane,
8622 struct drm_plane_state *old_plane_state)
8624 struct amdgpu_device *adev = drm_to_adev(plane->dev);
8625 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8626 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8627 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8628 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8629 uint64_t address = afb ? afb->address : 0;
8630 struct dc_cursor_position position = {0};
8631 struct dc_cursor_attributes attributes;
8634 if (!plane->state->fb && !old_plane_state->fb)
8637 DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8639 amdgpu_crtc->crtc_id,
8640 plane->state->crtc_w,
8641 plane->state->crtc_h);
8643 ret = get_cursor_position(plane, crtc, &position);
8647 if (!position.enable) {
8648 /* turn off cursor */
8649 if (crtc_state && crtc_state->stream) {
8650 mutex_lock(&adev->dm.dc_lock);
8651 dc_stream_set_cursor_position(crtc_state->stream,
8653 mutex_unlock(&adev->dm.dc_lock);
8658 amdgpu_crtc->cursor_width = plane->state->crtc_w;
8659 amdgpu_crtc->cursor_height = plane->state->crtc_h;
8661 memset(&attributes, 0, sizeof(attributes));
8662 attributes.address.high_part = upper_32_bits(address);
8663 attributes.address.low_part = lower_32_bits(address);
8664 attributes.width = plane->state->crtc_w;
8665 attributes.height = plane->state->crtc_h;
8666 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8667 attributes.rotation_angle = 0;
8668 attributes.attribute_flags.value = 0;
8670 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8672 if (crtc_state->stream) {
8673 mutex_lock(&adev->dm.dc_lock);
8674 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8676 DRM_ERROR("DC failed to set cursor attributes\n");
8678 if (!dc_stream_set_cursor_position(crtc_state->stream,
8680 DRM_ERROR("DC failed to set cursor position\n");
8681 mutex_unlock(&adev->dm.dc_lock);
8685 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8688 assert_spin_locked(&acrtc->base.dev->event_lock);
8689 WARN_ON(acrtc->event);
8691 acrtc->event = acrtc->base.state->event;
8693 /* Set the flip status */
8694 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8696 /* Mark this event as consumed */
8697 acrtc->base.state->event = NULL;
8699 DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8703 static void update_freesync_state_on_stream(
8704 struct amdgpu_display_manager *dm,
8705 struct dm_crtc_state *new_crtc_state,
8706 struct dc_stream_state *new_stream,
8707 struct dc_plane_state *surface,
8708 u32 flip_timestamp_in_us)
8710 struct mod_vrr_params vrr_params;
8711 struct dc_info_packet vrr_infopacket = {0};
8712 struct amdgpu_device *adev = dm->adev;
8713 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8714 unsigned long flags;
8715 bool pack_sdp_v1_3 = false;
8721 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8722 * For now it's sufficient to just guard against these conditions.
8725 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8728 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8729 vrr_params = acrtc->dm_irq_params.vrr_params;
8732 mod_freesync_handle_preflip(
8733 dm->freesync_module,
8736 flip_timestamp_in_us,
8739 if (adev->family < AMDGPU_FAMILY_AI &&
8740 amdgpu_dm_vrr_active(new_crtc_state)) {
8741 mod_freesync_handle_v_update(dm->freesync_module,
8742 new_stream, &vrr_params);
8744 /* Need to call this before the frame ends. */
8745 dc_stream_adjust_vmin_vmax(dm->dc,
8746 new_crtc_state->stream,
8747 &vrr_params.adjust);
8751 mod_freesync_build_vrr_infopacket(
8752 dm->freesync_module,
8756 TRANSFER_FUNC_UNKNOWN,
8760 new_crtc_state->freesync_timing_changed |=
8761 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8763 sizeof(vrr_params.adjust)) != 0);
8765 new_crtc_state->freesync_vrr_info_changed |=
8766 (memcmp(&new_crtc_state->vrr_infopacket,
8768 sizeof(vrr_infopacket)) != 0);
8770 acrtc->dm_irq_params.vrr_params = vrr_params;
8771 new_crtc_state->vrr_infopacket = vrr_infopacket;
8773 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8774 new_stream->vrr_infopacket = vrr_infopacket;
8776 if (new_crtc_state->freesync_vrr_info_changed)
8777 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8778 new_crtc_state->base.crtc->base.id,
8779 (int)new_crtc_state->base.vrr_enabled,
8780 (int)vrr_params.state);
8782 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8785 static void update_stream_irq_parameters(
8786 struct amdgpu_display_manager *dm,
8787 struct dm_crtc_state *new_crtc_state)
8789 struct dc_stream_state *new_stream = new_crtc_state->stream;
8790 struct mod_vrr_params vrr_params;
8791 struct mod_freesync_config config = new_crtc_state->freesync_config;
8792 struct amdgpu_device *adev = dm->adev;
8793 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8794 unsigned long flags;
8800 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8801 * For now it's sufficient to just guard against these conditions.
8803 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8806 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8807 vrr_params = acrtc->dm_irq_params.vrr_params;
8809 if (new_crtc_state->vrr_supported &&
8810 config.min_refresh_in_uhz &&
8811 config.max_refresh_in_uhz) {
8813 * if freesync compatible mode was set, config.state will be set
8816 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8817 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8818 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8819 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8820 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8821 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8822 vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8824 config.state = new_crtc_state->base.vrr_enabled ?
8825 VRR_STATE_ACTIVE_VARIABLE :
8829 config.state = VRR_STATE_UNSUPPORTED;
8832 mod_freesync_build_vrr_params(dm->freesync_module,
8834 &config, &vrr_params);
8836 new_crtc_state->freesync_timing_changed |=
8837 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8838 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8840 new_crtc_state->freesync_config = config;
8841 /* Copy state for access from DM IRQ handler */
8842 acrtc->dm_irq_params.freesync_config = config;
8843 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8844 acrtc->dm_irq_params.vrr_params = vrr_params;
8845 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8848 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8849 struct dm_crtc_state *new_state)
8851 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8852 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8854 if (!old_vrr_active && new_vrr_active) {
8855 /* Transition VRR inactive -> active:
8856 * While VRR is active, we must not disable vblank irq, as a
8857 * reenable after disable would compute bogus vblank/pflip
8858 * timestamps if it likely happened inside display front-porch.
8860 * We also need vupdate irq for the actual core vblank handling
8863 dm_set_vupdate_irq(new_state->base.crtc, true);
8864 drm_crtc_vblank_get(new_state->base.crtc);
8865 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8866 __func__, new_state->base.crtc->base.id);
8867 } else if (old_vrr_active && !new_vrr_active) {
8868 /* Transition VRR active -> inactive:
8869 * Allow vblank irq disable again for fixed refresh rate.
8871 dm_set_vupdate_irq(new_state->base.crtc, false);
8872 drm_crtc_vblank_put(new_state->base.crtc);
8873 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8874 __func__, new_state->base.crtc->base.id);
8878 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8880 struct drm_plane *plane;
8881 struct drm_plane_state *old_plane_state;
8885 * TODO: Make this per-stream so we don't issue redundant updates for
8886 * commits with multiple streams.
8888 for_each_old_plane_in_state(state, plane, old_plane_state, i)
8889 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8890 handle_cursor_update(plane, old_plane_state);
8893 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8894 struct dc_state *dc_state,
8895 struct drm_device *dev,
8896 struct amdgpu_display_manager *dm,
8897 struct drm_crtc *pcrtc,
8898 bool wait_for_vblank)
8901 uint64_t timestamp_ns;
8902 struct drm_plane *plane;
8903 struct drm_plane_state *old_plane_state, *new_plane_state;
8904 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8905 struct drm_crtc_state *new_pcrtc_state =
8906 drm_atomic_get_new_crtc_state(state, pcrtc);
8907 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8908 struct dm_crtc_state *dm_old_crtc_state =
8909 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8910 int planes_count = 0, vpos, hpos;
8912 unsigned long flags;
8913 struct amdgpu_bo *abo;
8914 uint32_t target_vblank, last_flip_vblank;
8915 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8916 bool pflip_present = false;
8918 struct dc_surface_update surface_updates[MAX_SURFACES];
8919 struct dc_plane_info plane_infos[MAX_SURFACES];
8920 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8921 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8922 struct dc_stream_update stream_update;
8925 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8928 dm_error("Failed to allocate update bundle\n");
8933 * Disable the cursor first if we're disabling all the planes.
8934 * It'll remain on the screen after the planes are re-enabled
8937 if (acrtc_state->active_planes == 0)
8938 amdgpu_dm_commit_cursors(state);
8940 /* update planes when needed */
8941 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8942 struct drm_crtc *crtc = new_plane_state->crtc;
8943 struct drm_crtc_state *new_crtc_state;
8944 struct drm_framebuffer *fb = new_plane_state->fb;
8945 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8946 bool plane_needs_flip;
8947 struct dc_plane_state *dc_plane;
8948 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8950 /* Cursor plane is handled after stream updates */
8951 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8954 if (!fb || !crtc || pcrtc != crtc)
8957 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8958 if (!new_crtc_state->active)
8961 dc_plane = dm_new_plane_state->dc_state;
8963 bundle->surface_updates[planes_count].surface = dc_plane;
8964 if (new_pcrtc_state->color_mgmt_changed) {
8965 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8966 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8967 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8970 fill_dc_scaling_info(new_plane_state,
8971 &bundle->scaling_infos[planes_count]);
8973 bundle->surface_updates[planes_count].scaling_info =
8974 &bundle->scaling_infos[planes_count];
8976 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8978 pflip_present = pflip_present || plane_needs_flip;
8980 if (!plane_needs_flip) {
8985 abo = gem_to_amdgpu_bo(fb->obj[0]);
8988 * Wait for all fences on this FB. Do limited wait to avoid
8989 * deadlock during GPU reset when this fence will not signal
8990 * but we hold reservation lock for the BO.
8992 r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
8993 msecs_to_jiffies(5000));
8994 if (unlikely(r <= 0))
8995 DRM_ERROR("Waiting for fences timed out!");
8997 fill_dc_plane_info_and_addr(
8998 dm->adev, new_plane_state,
9000 &bundle->plane_infos[planes_count],
9001 &bundle->flip_addrs[planes_count].address,
9002 afb->tmz_surface, false);
9004 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
9005 new_plane_state->plane->index,
9006 bundle->plane_infos[planes_count].dcc.enable);
9008 bundle->surface_updates[planes_count].plane_info =
9009 &bundle->plane_infos[planes_count];
9012 * Only allow immediate flips for fast updates that don't
9013 * change FB pitch, DCC state, rotation or mirroing.
9015 bundle->flip_addrs[planes_count].flip_immediate =
9016 crtc->state->async_flip &&
9017 acrtc_state->update_type == UPDATE_TYPE_FAST;
9019 timestamp_ns = ktime_get_ns();
9020 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9021 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9022 bundle->surface_updates[planes_count].surface = dc_plane;
9024 if (!bundle->surface_updates[planes_count].surface) {
9025 DRM_ERROR("No surface for CRTC: id=%d\n",
9026 acrtc_attach->crtc_id);
9030 if (plane == pcrtc->primary)
9031 update_freesync_state_on_stream(
9034 acrtc_state->stream,
9036 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
9038 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
9040 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9041 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
9047 if (pflip_present) {
9049 /* Use old throttling in non-vrr fixed refresh rate mode
9050 * to keep flip scheduling based on target vblank counts
9051 * working in a backwards compatible way, e.g., for
9052 * clients using the GLX_OML_sync_control extension or
9053 * DRI3/Present extension with defined target_msc.
9055 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
9058 /* For variable refresh rate mode only:
9059 * Get vblank of last completed flip to avoid > 1 vrr
9060 * flips per video frame by use of throttling, but allow
9061 * flip programming anywhere in the possibly large
9062 * variable vrr vblank interval for fine-grained flip
9063 * timing control and more opportunity to avoid stutter
9064 * on late submission of flips.
9066 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9067 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
9068 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9071 target_vblank = last_flip_vblank + wait_for_vblank;
9074 * Wait until we're out of the vertical blank period before the one
9075 * targeted by the flip
9077 while ((acrtc_attach->enabled &&
9078 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9079 0, &vpos, &hpos, NULL,
9080 NULL, &pcrtc->hwmode)
9081 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9082 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9083 (int)(target_vblank -
9084 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
9085 usleep_range(1000, 1100);
9089 * Prepare the flip event for the pageflip interrupt to handle.
9091 * This only works in the case where we've already turned on the
9092 * appropriate hardware blocks (eg. HUBP) so in the transition case
9093 * from 0 -> n planes we have to skip a hardware generated event
9094 * and rely on sending it from software.
9096 if (acrtc_attach->base.state->event &&
9097 acrtc_state->active_planes > 0) {
9098 drm_crtc_vblank_get(pcrtc);
9100 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9102 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9103 prepare_flip_isr(acrtc_attach);
9105 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9108 if (acrtc_state->stream) {
9109 if (acrtc_state->freesync_vrr_info_changed)
9110 bundle->stream_update.vrr_infopacket =
9111 &acrtc_state->stream->vrr_infopacket;
9115 /* Update the planes if changed or disable if we don't have any. */
9116 if ((planes_count || acrtc_state->active_planes == 0) &&
9117 acrtc_state->stream) {
9118 #if defined(CONFIG_DRM_AMD_DC_DCN)
9120 * If PSR or idle optimizations are enabled then flush out
9121 * any pending work before hardware programming.
9123 if (dm->vblank_control_workqueue)
9124 flush_workqueue(dm->vblank_control_workqueue);
9127 bundle->stream_update.stream = acrtc_state->stream;
9128 if (new_pcrtc_state->mode_changed) {
9129 bundle->stream_update.src = acrtc_state->stream->src;
9130 bundle->stream_update.dst = acrtc_state->stream->dst;
9133 if (new_pcrtc_state->color_mgmt_changed) {
9135 * TODO: This isn't fully correct since we've actually
9136 * already modified the stream in place.
9138 bundle->stream_update.gamut_remap =
9139 &acrtc_state->stream->gamut_remap_matrix;
9140 bundle->stream_update.output_csc_transform =
9141 &acrtc_state->stream->csc_color_matrix;
9142 bundle->stream_update.out_transfer_func =
9143 acrtc_state->stream->out_transfer_func;
9146 acrtc_state->stream->abm_level = acrtc_state->abm_level;
9147 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
9148 bundle->stream_update.abm_level = &acrtc_state->abm_level;
9151 * If FreeSync state on the stream has changed then we need to
9152 * re-adjust the min/max bounds now that DC doesn't handle this
9153 * as part of commit.
9155 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
9156 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9157 dc_stream_adjust_vmin_vmax(
9158 dm->dc, acrtc_state->stream,
9159 &acrtc_attach->dm_irq_params.vrr_params.adjust);
9160 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9162 mutex_lock(&dm->dc_lock);
9163 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9164 acrtc_state->stream->link->psr_settings.psr_allow_active)
9165 amdgpu_dm_psr_disable(acrtc_state->stream);
9167 dc_commit_updates_for_stream(dm->dc,
9168 bundle->surface_updates,
9170 acrtc_state->stream,
9171 &bundle->stream_update,
9175 * Enable or disable the interrupts on the backend.
9177 * Most pipes are put into power gating when unused.
9179 * When power gating is enabled on a pipe we lose the
9180 * interrupt enablement state when power gating is disabled.
9182 * So we need to update the IRQ control state in hardware
9183 * whenever the pipe turns on (since it could be previously
9184 * power gated) or off (since some pipes can't be power gated
9187 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9188 dm_update_pflip_irq_state(drm_to_adev(dev),
9191 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9192 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9193 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
9194 amdgpu_dm_link_setup_psr(acrtc_state->stream);
9196 /* Decrement skip count when PSR is enabled and we're doing fast updates. */
9197 if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9198 acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9199 struct amdgpu_dm_connector *aconn =
9200 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9202 if (aconn->psr_skip_count > 0)
9203 aconn->psr_skip_count--;
9205 /* Allow PSR when skip count is 0. */
9206 acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9208 acrtc_attach->dm_irq_params.allow_psr_entry = false;
9211 mutex_unlock(&dm->dc_lock);
9215 * Update cursor state *after* programming all the planes.
9216 * This avoids redundant programming in the case where we're going
9217 * to be disabling a single plane - those pipes are being disabled.
9219 if (acrtc_state->active_planes)
9220 amdgpu_dm_commit_cursors(state);
9226 static void amdgpu_dm_commit_audio(struct drm_device *dev,
9227 struct drm_atomic_state *state)
9229 struct amdgpu_device *adev = drm_to_adev(dev);
9230 struct amdgpu_dm_connector *aconnector;
9231 struct drm_connector *connector;
9232 struct drm_connector_state *old_con_state, *new_con_state;
9233 struct drm_crtc_state *new_crtc_state;
9234 struct dm_crtc_state *new_dm_crtc_state;
9235 const struct dc_stream_status *status;
9238 /* Notify device removals. */
9239 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9240 if (old_con_state->crtc != new_con_state->crtc) {
9241 /* CRTC changes require notification. */
9245 if (!new_con_state->crtc)
9248 new_crtc_state = drm_atomic_get_new_crtc_state(
9249 state, new_con_state->crtc);
9251 if (!new_crtc_state)
9254 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9258 aconnector = to_amdgpu_dm_connector(connector);
9260 mutex_lock(&adev->dm.audio_lock);
9261 inst = aconnector->audio_inst;
9262 aconnector->audio_inst = -1;
9263 mutex_unlock(&adev->dm.audio_lock);
9265 amdgpu_dm_audio_eld_notify(adev, inst);
9268 /* Notify audio device additions. */
9269 for_each_new_connector_in_state(state, connector, new_con_state, i) {
9270 if (!new_con_state->crtc)
9273 new_crtc_state = drm_atomic_get_new_crtc_state(
9274 state, new_con_state->crtc);
9276 if (!new_crtc_state)
9279 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9282 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9283 if (!new_dm_crtc_state->stream)
9286 status = dc_stream_get_status(new_dm_crtc_state->stream);
9290 aconnector = to_amdgpu_dm_connector(connector);
9292 mutex_lock(&adev->dm.audio_lock);
9293 inst = status->audio_inst;
9294 aconnector->audio_inst = inst;
9295 mutex_unlock(&adev->dm.audio_lock);
9297 amdgpu_dm_audio_eld_notify(adev, inst);
9302 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9303 * @crtc_state: the DRM CRTC state
9304 * @stream_state: the DC stream state.
9306 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9307 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9309 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9310 struct dc_stream_state *stream_state)
9312 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9316 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9317 * @state: The atomic state to commit
9319 * This will tell DC to commit the constructed DC state from atomic_check,
9320 * programming the hardware. Any failures here implies a hardware failure, since
9321 * atomic check should have filtered anything non-kosher.
9323 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9325 struct drm_device *dev = state->dev;
9326 struct amdgpu_device *adev = drm_to_adev(dev);
9327 struct amdgpu_display_manager *dm = &adev->dm;
9328 struct dm_atomic_state *dm_state;
9329 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
9331 struct drm_crtc *crtc;
9332 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9333 unsigned long flags;
9334 bool wait_for_vblank = true;
9335 struct drm_connector *connector;
9336 struct drm_connector_state *old_con_state, *new_con_state;
9337 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9338 int crtc_disable_count = 0;
9339 bool mode_set_reset_required = false;
9341 trace_amdgpu_dm_atomic_commit_tail_begin(state);
9343 drm_atomic_helper_update_legacy_modeset_state(dev, state);
9345 dm_state = dm_atomic_get_new_state(state);
9346 if (dm_state && dm_state->context) {
9347 dc_state = dm_state->context;
9349 /* No state changes, retain current state. */
9350 dc_state_temp = dc_create_state(dm->dc);
9351 ASSERT(dc_state_temp);
9352 dc_state = dc_state_temp;
9353 dc_resource_state_copy_construct_current(dm->dc, dc_state);
9356 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9357 new_crtc_state, i) {
9358 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9360 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9362 if (old_crtc_state->active &&
9363 (!new_crtc_state->active ||
9364 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9365 manage_dm_interrupts(adev, acrtc, false);
9366 dc_stream_release(dm_old_crtc_state->stream);
9370 drm_atomic_helper_calc_timestamping_constants(state);
9372 /* update changed items */
9373 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9374 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9376 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9377 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9380 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9381 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9382 "connectors_changed:%d\n",
9384 new_crtc_state->enable,
9385 new_crtc_state->active,
9386 new_crtc_state->planes_changed,
9387 new_crtc_state->mode_changed,
9388 new_crtc_state->active_changed,
9389 new_crtc_state->connectors_changed);
9391 /* Disable cursor if disabling crtc */
9392 if (old_crtc_state->active && !new_crtc_state->active) {
9393 struct dc_cursor_position position;
9395 memset(&position, 0, sizeof(position));
9396 mutex_lock(&dm->dc_lock);
9397 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9398 mutex_unlock(&dm->dc_lock);
9401 /* Copy all transient state flags into dc state */
9402 if (dm_new_crtc_state->stream) {
9403 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9404 dm_new_crtc_state->stream);
9407 /* handles headless hotplug case, updating new_state and
9408 * aconnector as needed
9411 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9413 DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9415 if (!dm_new_crtc_state->stream) {
9417 * this could happen because of issues with
9418 * userspace notifications delivery.
9419 * In this case userspace tries to set mode on
9420 * display which is disconnected in fact.
9421 * dc_sink is NULL in this case on aconnector.
9422 * We expect reset mode will come soon.
9424 * This can also happen when unplug is done
9425 * during resume sequence ended
9427 * In this case, we want to pretend we still
9428 * have a sink to keep the pipe running so that
9429 * hw state is consistent with the sw state
9431 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9432 __func__, acrtc->base.base.id);
9436 if (dm_old_crtc_state->stream)
9437 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9439 pm_runtime_get_noresume(dev->dev);
9441 acrtc->enabled = true;
9442 acrtc->hw_mode = new_crtc_state->mode;
9443 crtc->hwmode = new_crtc_state->mode;
9444 mode_set_reset_required = true;
9445 } else if (modereset_required(new_crtc_state)) {
9446 DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9447 /* i.e. reset mode */
9448 if (dm_old_crtc_state->stream)
9449 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9451 mode_set_reset_required = true;
9453 } /* for_each_crtc_in_state() */
9456 /* if there mode set or reset, disable eDP PSR */
9457 if (mode_set_reset_required) {
9458 #if defined(CONFIG_DRM_AMD_DC_DCN)
9459 if (dm->vblank_control_workqueue)
9460 flush_workqueue(dm->vblank_control_workqueue);
9462 amdgpu_dm_psr_disable_all(dm);
9465 dm_enable_per_frame_crtc_master_sync(dc_state);
9466 mutex_lock(&dm->dc_lock);
9467 WARN_ON(!dc_commit_state(dm->dc, dc_state));
9468 #if defined(CONFIG_DRM_AMD_DC_DCN)
9469 /* Allow idle optimization when vblank count is 0 for display off */
9470 if (dm->active_vblank_irq_count == 0)
9471 dc_allow_idle_optimizations(dm->dc,true);
9473 mutex_unlock(&dm->dc_lock);
9476 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9477 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9479 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9481 if (dm_new_crtc_state->stream != NULL) {
9482 const struct dc_stream_status *status =
9483 dc_stream_get_status(dm_new_crtc_state->stream);
9486 status = dc_stream_get_status_from_state(dc_state,
9487 dm_new_crtc_state->stream);
9489 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
9491 acrtc->otg_inst = status->primary_otg_inst;
9494 #ifdef CONFIG_DRM_AMD_DC_HDCP
9495 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9496 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9497 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9498 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9500 new_crtc_state = NULL;
9503 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9505 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9507 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9508 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9509 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9510 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9511 dm_new_con_state->update_hdcp = true;
9515 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9516 hdcp_update_display(
9517 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9518 new_con_state->hdcp_content_type,
9519 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9523 /* Handle connector state changes */
9524 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9525 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9526 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9527 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9528 struct dc_surface_update dummy_updates[MAX_SURFACES];
9529 struct dc_stream_update stream_update;
9530 struct dc_info_packet hdr_packet;
9531 struct dc_stream_status *status = NULL;
9532 bool abm_changed, hdr_changed, scaling_changed;
9534 memset(&dummy_updates, 0, sizeof(dummy_updates));
9535 memset(&stream_update, 0, sizeof(stream_update));
9538 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9539 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9542 /* Skip any modesets/resets */
9543 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9546 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9547 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9549 scaling_changed = is_scaling_state_different(dm_new_con_state,
9552 abm_changed = dm_new_crtc_state->abm_level !=
9553 dm_old_crtc_state->abm_level;
9556 !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9558 if (!scaling_changed && !abm_changed && !hdr_changed)
9561 stream_update.stream = dm_new_crtc_state->stream;
9562 if (scaling_changed) {
9563 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9564 dm_new_con_state, dm_new_crtc_state->stream);
9566 stream_update.src = dm_new_crtc_state->stream->src;
9567 stream_update.dst = dm_new_crtc_state->stream->dst;
9571 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9573 stream_update.abm_level = &dm_new_crtc_state->abm_level;
9577 fill_hdr_info_packet(new_con_state, &hdr_packet);
9578 stream_update.hdr_static_metadata = &hdr_packet;
9581 status = dc_stream_get_status(dm_new_crtc_state->stream);
9583 if (WARN_ON(!status))
9586 WARN_ON(!status->plane_count);
9589 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9590 * Here we create an empty update on each plane.
9591 * To fix this, DC should permit updating only stream properties.
9593 for (j = 0; j < status->plane_count; j++)
9594 dummy_updates[j].surface = status->plane_states[0];
9597 mutex_lock(&dm->dc_lock);
9598 dc_commit_updates_for_stream(dm->dc,
9600 status->plane_count,
9601 dm_new_crtc_state->stream,
9604 mutex_unlock(&dm->dc_lock);
9607 /* Count number of newly disabled CRTCs for dropping PM refs later. */
9608 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9609 new_crtc_state, i) {
9610 if (old_crtc_state->active && !new_crtc_state->active)
9611 crtc_disable_count++;
9613 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9614 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9616 /* For freesync config update on crtc state and params for irq */
9617 update_stream_irq_parameters(dm, dm_new_crtc_state);
9619 /* Handle vrr on->off / off->on transitions */
9620 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9625 * Enable interrupts for CRTCs that are newly enabled or went through
9626 * a modeset. It was intentionally deferred until after the front end
9627 * state was modified to wait until the OTG was on and so the IRQ
9628 * handlers didn't access stale or invalid state.
9630 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9631 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9632 #ifdef CONFIG_DEBUG_FS
9633 bool configure_crc = false;
9634 enum amdgpu_dm_pipe_crc_source cur_crc_src;
9635 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9636 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9638 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9639 cur_crc_src = acrtc->dm_irq_params.crc_src;
9640 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9642 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9644 if (new_crtc_state->active &&
9645 (!old_crtc_state->active ||
9646 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9647 dc_stream_retain(dm_new_crtc_state->stream);
9648 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9649 manage_dm_interrupts(adev, acrtc, true);
9651 #ifdef CONFIG_DEBUG_FS
9653 * Frontend may have changed so reapply the CRC capture
9654 * settings for the stream.
9656 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9658 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9659 configure_crc = true;
9660 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9661 if (amdgpu_dm_crc_window_is_activated(crtc)) {
9662 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9663 acrtc->dm_irq_params.crc_window.update_win = true;
9664 acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9665 spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9666 crc_rd_wrk->crtc = crtc;
9667 spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9668 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9674 if (amdgpu_dm_crtc_configure_crc_source(
9675 crtc, dm_new_crtc_state, cur_crc_src))
9676 DRM_DEBUG_DRIVER("Failed to configure crc source");
9681 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9682 if (new_crtc_state->async_flip)
9683 wait_for_vblank = false;
9685 /* update planes when needed per crtc*/
9686 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9687 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9689 if (dm_new_crtc_state->stream)
9690 amdgpu_dm_commit_planes(state, dc_state, dev,
9691 dm, crtc, wait_for_vblank);
9694 /* Update audio instances for each connector. */
9695 amdgpu_dm_commit_audio(dev, state);
9697 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || \
9698 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9699 /* restore the backlight level */
9700 for (i = 0; i < dm->num_of_edps; i++) {
9701 if (dm->backlight_dev[i] &&
9702 (dm->actual_brightness[i] != dm->brightness[i]))
9703 amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9707 * send vblank event on all events not handled in flip and
9708 * mark consumed event for drm_atomic_helper_commit_hw_done
9710 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9711 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9713 if (new_crtc_state->event)
9714 drm_send_event_locked(dev, &new_crtc_state->event->base);
9716 new_crtc_state->event = NULL;
9718 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9720 /* Signal HW programming completion */
9721 drm_atomic_helper_commit_hw_done(state);
9723 if (wait_for_vblank)
9724 drm_atomic_helper_wait_for_flip_done(dev, state);
9726 drm_atomic_helper_cleanup_planes(dev, state);
9728 /* return the stolen vga memory back to VRAM */
9729 if (!adev->mman.keep_stolen_vga_memory)
9730 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9731 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9734 * Finally, drop a runtime PM reference for each newly disabled CRTC,
9735 * so we can put the GPU into runtime suspend if we're not driving any
9738 for (i = 0; i < crtc_disable_count; i++)
9739 pm_runtime_put_autosuspend(dev->dev);
9740 pm_runtime_mark_last_busy(dev->dev);
9743 dc_release_state(dc_state_temp);
9747 static int dm_force_atomic_commit(struct drm_connector *connector)
9750 struct drm_device *ddev = connector->dev;
9751 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9752 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9753 struct drm_plane *plane = disconnected_acrtc->base.primary;
9754 struct drm_connector_state *conn_state;
9755 struct drm_crtc_state *crtc_state;
9756 struct drm_plane_state *plane_state;
9761 state->acquire_ctx = ddev->mode_config.acquire_ctx;
9763 /* Construct an atomic state to restore previous display setting */
9766 * Attach connectors to drm_atomic_state
9768 conn_state = drm_atomic_get_connector_state(state, connector);
9770 ret = PTR_ERR_OR_ZERO(conn_state);
9774 /* Attach crtc to drm_atomic_state*/
9775 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9777 ret = PTR_ERR_OR_ZERO(crtc_state);
9781 /* force a restore */
9782 crtc_state->mode_changed = true;
9784 /* Attach plane to drm_atomic_state */
9785 plane_state = drm_atomic_get_plane_state(state, plane);
9787 ret = PTR_ERR_OR_ZERO(plane_state);
9791 /* Call commit internally with the state we just constructed */
9792 ret = drm_atomic_commit(state);
9795 drm_atomic_state_put(state);
9797 DRM_ERROR("Restoring old state failed with %i\n", ret);
9803 * This function handles all cases when set mode does not come upon hotplug.
9804 * This includes when a display is unplugged then plugged back into the
9805 * same port and when running without usermode desktop manager supprot
9807 void dm_restore_drm_connector_state(struct drm_device *dev,
9808 struct drm_connector *connector)
9810 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9811 struct amdgpu_crtc *disconnected_acrtc;
9812 struct dm_crtc_state *acrtc_state;
9814 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9817 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9818 if (!disconnected_acrtc)
9821 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9822 if (!acrtc_state->stream)
9826 * If the previous sink is not released and different from the current,
9827 * we deduce we are in a state where we can not rely on usermode call
9828 * to turn on the display, so we do it here
9830 if (acrtc_state->stream->sink != aconnector->dc_sink)
9831 dm_force_atomic_commit(&aconnector->base);
9835 * Grabs all modesetting locks to serialize against any blocking commits,
9836 * Waits for completion of all non blocking commits.
9838 static int do_aquire_global_lock(struct drm_device *dev,
9839 struct drm_atomic_state *state)
9841 struct drm_crtc *crtc;
9842 struct drm_crtc_commit *commit;
9846 * Adding all modeset locks to aquire_ctx will
9847 * ensure that when the framework release it the
9848 * extra locks we are locking here will get released to
9850 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9854 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9855 spin_lock(&crtc->commit_lock);
9856 commit = list_first_entry_or_null(&crtc->commit_list,
9857 struct drm_crtc_commit, commit_entry);
9859 drm_crtc_commit_get(commit);
9860 spin_unlock(&crtc->commit_lock);
9866 * Make sure all pending HW programming completed and
9869 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9872 ret = wait_for_completion_interruptible_timeout(
9873 &commit->flip_done, 10*HZ);
9876 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9877 "timed out\n", crtc->base.id, crtc->name);
9879 drm_crtc_commit_put(commit);
9882 return ret < 0 ? ret : 0;
9885 static void get_freesync_config_for_crtc(
9886 struct dm_crtc_state *new_crtc_state,
9887 struct dm_connector_state *new_con_state)
9889 struct mod_freesync_config config = {0};
9890 struct amdgpu_dm_connector *aconnector =
9891 to_amdgpu_dm_connector(new_con_state->base.connector);
9892 struct drm_display_mode *mode = &new_crtc_state->base.mode;
9893 int vrefresh = drm_mode_vrefresh(mode);
9894 bool fs_vid_mode = false;
9896 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9897 vrefresh >= aconnector->min_vfreq &&
9898 vrefresh <= aconnector->max_vfreq;
9900 if (new_crtc_state->vrr_supported) {
9901 new_crtc_state->stream->ignore_msa_timing_param = true;
9902 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9904 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9905 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9906 config.vsif_supported = true;
9910 config.state = VRR_STATE_ACTIVE_FIXED;
9911 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9913 } else if (new_crtc_state->base.vrr_enabled) {
9914 config.state = VRR_STATE_ACTIVE_VARIABLE;
9916 config.state = VRR_STATE_INACTIVE;
9920 new_crtc_state->freesync_config = config;
9923 static void reset_freesync_config_for_crtc(
9924 struct dm_crtc_state *new_crtc_state)
9926 new_crtc_state->vrr_supported = false;
9928 memset(&new_crtc_state->vrr_infopacket, 0,
9929 sizeof(new_crtc_state->vrr_infopacket));
9933 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9934 struct drm_crtc_state *new_crtc_state)
9936 struct drm_display_mode old_mode, new_mode;
9938 if (!old_crtc_state || !new_crtc_state)
9941 old_mode = old_crtc_state->mode;
9942 new_mode = new_crtc_state->mode;
9944 if (old_mode.clock == new_mode.clock &&
9945 old_mode.hdisplay == new_mode.hdisplay &&
9946 old_mode.vdisplay == new_mode.vdisplay &&
9947 old_mode.htotal == new_mode.htotal &&
9948 old_mode.vtotal != new_mode.vtotal &&
9949 old_mode.hsync_start == new_mode.hsync_start &&
9950 old_mode.vsync_start != new_mode.vsync_start &&
9951 old_mode.hsync_end == new_mode.hsync_end &&
9952 old_mode.vsync_end != new_mode.vsync_end &&
9953 old_mode.hskew == new_mode.hskew &&
9954 old_mode.vscan == new_mode.vscan &&
9955 (old_mode.vsync_end - old_mode.vsync_start) ==
9956 (new_mode.vsync_end - new_mode.vsync_start))
9962 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9963 uint64_t num, den, res;
9964 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9966 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9968 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9969 den = (unsigned long long)new_crtc_state->mode.htotal *
9970 (unsigned long long)new_crtc_state->mode.vtotal;
9972 res = div_u64(num, den);
9973 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9976 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9977 struct drm_atomic_state *state,
9978 struct drm_crtc *crtc,
9979 struct drm_crtc_state *old_crtc_state,
9980 struct drm_crtc_state *new_crtc_state,
9982 bool *lock_and_validation_needed)
9984 struct dm_atomic_state *dm_state = NULL;
9985 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9986 struct dc_stream_state *new_stream;
9990 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9991 * update changed items
9993 struct amdgpu_crtc *acrtc = NULL;
9994 struct amdgpu_dm_connector *aconnector = NULL;
9995 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9996 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
10000 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10001 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10002 acrtc = to_amdgpu_crtc(crtc);
10003 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
10005 /* TODO This hack should go away */
10006 if (aconnector && enable) {
10007 /* Make sure fake sink is created in plug-in scenario */
10008 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10009 &aconnector->base);
10010 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10011 &aconnector->base);
10013 if (IS_ERR(drm_new_conn_state)) {
10014 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10018 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10019 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
10021 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10024 new_stream = create_validate_stream_for_sink(aconnector,
10025 &new_crtc_state->mode,
10027 dm_old_crtc_state->stream);
10030 * we can have no stream on ACTION_SET if a display
10031 * was disconnected during S3, in this case it is not an
10032 * error, the OS will be updated after detection, and
10033 * will do the right thing on next atomic commit
10037 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10038 __func__, acrtc->base.base.id);
10044 * TODO: Check VSDB bits to decide whether this should
10045 * be enabled or not.
10047 new_stream->triggered_crtc_reset.enabled =
10048 dm->force_timing_sync;
10050 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10052 ret = fill_hdr_info_packet(drm_new_conn_state,
10053 &new_stream->hdr_static_metadata);
10058 * If we already removed the old stream from the context
10059 * (and set the new stream to NULL) then we can't reuse
10060 * the old stream even if the stream and scaling are unchanged.
10061 * We'll hit the BUG_ON and black screen.
10063 * TODO: Refactor this function to allow this check to work
10064 * in all conditions.
10066 if (amdgpu_freesync_vid_mode &&
10067 dm_new_crtc_state->stream &&
10068 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10071 if (dm_new_crtc_state->stream &&
10072 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10073 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10074 new_crtc_state->mode_changed = false;
10075 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10076 new_crtc_state->mode_changed);
10080 /* mode_changed flag may get updated above, need to check again */
10081 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10085 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10086 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
10087 "connectors_changed:%d\n",
10089 new_crtc_state->enable,
10090 new_crtc_state->active,
10091 new_crtc_state->planes_changed,
10092 new_crtc_state->mode_changed,
10093 new_crtc_state->active_changed,
10094 new_crtc_state->connectors_changed);
10096 /* Remove stream for any changed/disabled CRTC */
10099 if (!dm_old_crtc_state->stream)
10102 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
10103 is_timing_unchanged_for_freesync(new_crtc_state,
10105 new_crtc_state->mode_changed = false;
10107 "Mode change not required for front porch change, "
10108 "setting mode_changed to %d",
10109 new_crtc_state->mode_changed);
10111 set_freesync_fixed_config(dm_new_crtc_state);
10114 } else if (amdgpu_freesync_vid_mode && aconnector &&
10115 is_freesync_video_mode(&new_crtc_state->mode,
10117 struct drm_display_mode *high_mode;
10119 high_mode = get_highest_refresh_rate_mode(aconnector, false);
10120 if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10121 set_freesync_fixed_config(dm_new_crtc_state);
10125 ret = dm_atomic_get_state(state, &dm_state);
10129 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10132 /* i.e. reset mode */
10133 if (dc_remove_stream_from_ctx(
10136 dm_old_crtc_state->stream) != DC_OK) {
10141 dc_stream_release(dm_old_crtc_state->stream);
10142 dm_new_crtc_state->stream = NULL;
10144 reset_freesync_config_for_crtc(dm_new_crtc_state);
10146 *lock_and_validation_needed = true;
10148 } else {/* Add stream for any updated/enabled CRTC */
10150 * Quick fix to prevent NULL pointer on new_stream when
10151 * added MST connectors not found in existing crtc_state in the chained mode
10152 * TODO: need to dig out the root cause of that
10154 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10157 if (modereset_required(new_crtc_state))
10160 if (modeset_required(new_crtc_state, new_stream,
10161 dm_old_crtc_state->stream)) {
10163 WARN_ON(dm_new_crtc_state->stream);
10165 ret = dm_atomic_get_state(state, &dm_state);
10169 dm_new_crtc_state->stream = new_stream;
10171 dc_stream_retain(new_stream);
10173 DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10176 if (dc_add_stream_to_ctx(
10179 dm_new_crtc_state->stream) != DC_OK) {
10184 *lock_and_validation_needed = true;
10189 /* Release extra reference */
10191 dc_stream_release(new_stream);
10194 * We want to do dc stream updates that do not require a
10195 * full modeset below.
10197 if (!(enable && aconnector && new_crtc_state->active))
10200 * Given above conditions, the dc state cannot be NULL because:
10201 * 1. We're in the process of enabling CRTCs (just been added
10202 * to the dc context, or already is on the context)
10203 * 2. Has a valid connector attached, and
10204 * 3. Is currently active and enabled.
10205 * => The dc stream state currently exists.
10207 BUG_ON(dm_new_crtc_state->stream == NULL);
10209 /* Scaling or underscan settings */
10210 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10211 drm_atomic_crtc_needs_modeset(new_crtc_state))
10212 update_stream_scaling_settings(
10213 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10216 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10219 * Color management settings. We also update color properties
10220 * when a modeset is needed, to ensure it gets reprogrammed.
10222 if (dm_new_crtc_state->base.color_mgmt_changed ||
10223 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10224 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10229 /* Update Freesync settings. */
10230 get_freesync_config_for_crtc(dm_new_crtc_state,
10231 dm_new_conn_state);
10237 dc_stream_release(new_stream);
10241 static bool should_reset_plane(struct drm_atomic_state *state,
10242 struct drm_plane *plane,
10243 struct drm_plane_state *old_plane_state,
10244 struct drm_plane_state *new_plane_state)
10246 struct drm_plane *other;
10247 struct drm_plane_state *old_other_state, *new_other_state;
10248 struct drm_crtc_state *new_crtc_state;
10252 * TODO: Remove this hack once the checks below are sufficient
10253 * enough to determine when we need to reset all the planes on
10256 if (state->allow_modeset)
10259 /* Exit early if we know that we're adding or removing the plane. */
10260 if (old_plane_state->crtc != new_plane_state->crtc)
10263 /* old crtc == new_crtc == NULL, plane not in context. */
10264 if (!new_plane_state->crtc)
10268 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10270 if (!new_crtc_state)
10273 /* CRTC Degamma changes currently require us to recreate planes. */
10274 if (new_crtc_state->color_mgmt_changed)
10277 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10281 * If there are any new primary or overlay planes being added or
10282 * removed then the z-order can potentially change. To ensure
10283 * correct z-order and pipe acquisition the current DC architecture
10284 * requires us to remove and recreate all existing planes.
10286 * TODO: Come up with a more elegant solution for this.
10288 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10289 struct amdgpu_framebuffer *old_afb, *new_afb;
10290 if (other->type == DRM_PLANE_TYPE_CURSOR)
10293 if (old_other_state->crtc != new_plane_state->crtc &&
10294 new_other_state->crtc != new_plane_state->crtc)
10297 if (old_other_state->crtc != new_other_state->crtc)
10300 /* Src/dst size and scaling updates. */
10301 if (old_other_state->src_w != new_other_state->src_w ||
10302 old_other_state->src_h != new_other_state->src_h ||
10303 old_other_state->crtc_w != new_other_state->crtc_w ||
10304 old_other_state->crtc_h != new_other_state->crtc_h)
10307 /* Rotation / mirroring updates. */
10308 if (old_other_state->rotation != new_other_state->rotation)
10311 /* Blending updates. */
10312 if (old_other_state->pixel_blend_mode !=
10313 new_other_state->pixel_blend_mode)
10316 /* Alpha updates. */
10317 if (old_other_state->alpha != new_other_state->alpha)
10320 /* Colorspace changes. */
10321 if (old_other_state->color_range != new_other_state->color_range ||
10322 old_other_state->color_encoding != new_other_state->color_encoding)
10325 /* Framebuffer checks fall at the end. */
10326 if (!old_other_state->fb || !new_other_state->fb)
10329 /* Pixel format changes can require bandwidth updates. */
10330 if (old_other_state->fb->format != new_other_state->fb->format)
10333 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10334 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10336 /* Tiling and DCC changes also require bandwidth updates. */
10337 if (old_afb->tiling_flags != new_afb->tiling_flags ||
10338 old_afb->base.modifier != new_afb->base.modifier)
10345 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10346 struct drm_plane_state *new_plane_state,
10347 struct drm_framebuffer *fb)
10349 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10350 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10351 unsigned int pitch;
10354 if (fb->width > new_acrtc->max_cursor_width ||
10355 fb->height > new_acrtc->max_cursor_height) {
10356 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10357 new_plane_state->fb->width,
10358 new_plane_state->fb->height);
10361 if (new_plane_state->src_w != fb->width << 16 ||
10362 new_plane_state->src_h != fb->height << 16) {
10363 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10367 /* Pitch in pixels */
10368 pitch = fb->pitches[0] / fb->format->cpp[0];
10370 if (fb->width != pitch) {
10371 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10380 /* FB pitch is supported by cursor plane */
10383 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10387 /* Core DRM takes care of checking FB modifiers, so we only need to
10388 * check tiling flags when the FB doesn't have a modifier. */
10389 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10390 if (adev->family < AMDGPU_FAMILY_AI) {
10391 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10392 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10393 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10395 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10398 DRM_DEBUG_ATOMIC("Cursor FB not linear");
10406 static int dm_update_plane_state(struct dc *dc,
10407 struct drm_atomic_state *state,
10408 struct drm_plane *plane,
10409 struct drm_plane_state *old_plane_state,
10410 struct drm_plane_state *new_plane_state,
10412 bool *lock_and_validation_needed)
10415 struct dm_atomic_state *dm_state = NULL;
10416 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10417 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10418 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10419 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10420 struct amdgpu_crtc *new_acrtc;
10425 new_plane_crtc = new_plane_state->crtc;
10426 old_plane_crtc = old_plane_state->crtc;
10427 dm_new_plane_state = to_dm_plane_state(new_plane_state);
10428 dm_old_plane_state = to_dm_plane_state(old_plane_state);
10430 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10431 if (!enable || !new_plane_crtc ||
10432 drm_atomic_plane_disabling(plane->state, new_plane_state))
10435 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10437 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10438 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10442 if (new_plane_state->fb) {
10443 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10444 new_plane_state->fb);
10452 needs_reset = should_reset_plane(state, plane, old_plane_state,
10455 /* Remove any changed/removed planes */
10460 if (!old_plane_crtc)
10463 old_crtc_state = drm_atomic_get_old_crtc_state(
10464 state, old_plane_crtc);
10465 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10467 if (!dm_old_crtc_state->stream)
10470 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10471 plane->base.id, old_plane_crtc->base.id);
10473 ret = dm_atomic_get_state(state, &dm_state);
10477 if (!dc_remove_plane_from_context(
10479 dm_old_crtc_state->stream,
10480 dm_old_plane_state->dc_state,
10481 dm_state->context)) {
10487 dc_plane_state_release(dm_old_plane_state->dc_state);
10488 dm_new_plane_state->dc_state = NULL;
10490 *lock_and_validation_needed = true;
10492 } else { /* Add new planes */
10493 struct dc_plane_state *dc_new_plane_state;
10495 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10498 if (!new_plane_crtc)
10501 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10502 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10504 if (!dm_new_crtc_state->stream)
10510 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10514 WARN_ON(dm_new_plane_state->dc_state);
10516 dc_new_plane_state = dc_create_plane_state(dc);
10517 if (!dc_new_plane_state)
10520 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10521 plane->base.id, new_plane_crtc->base.id);
10523 ret = fill_dc_plane_attributes(
10524 drm_to_adev(new_plane_crtc->dev),
10525 dc_new_plane_state,
10529 dc_plane_state_release(dc_new_plane_state);
10533 ret = dm_atomic_get_state(state, &dm_state);
10535 dc_plane_state_release(dc_new_plane_state);
10540 * Any atomic check errors that occur after this will
10541 * not need a release. The plane state will be attached
10542 * to the stream, and therefore part of the atomic
10543 * state. It'll be released when the atomic state is
10546 if (!dc_add_plane_to_context(
10548 dm_new_crtc_state->stream,
10549 dc_new_plane_state,
10550 dm_state->context)) {
10552 dc_plane_state_release(dc_new_plane_state);
10556 dm_new_plane_state->dc_state = dc_new_plane_state;
10558 /* Tell DC to do a full surface update every time there
10559 * is a plane change. Inefficient, but works for now.
10561 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10563 *lock_and_validation_needed = true;
10570 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10571 struct drm_crtc *crtc,
10572 struct drm_crtc_state *new_crtc_state)
10574 struct drm_plane_state *new_cursor_state, *new_primary_state;
10575 int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
10577 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10578 * cursor per pipe but it's going to inherit the scaling and
10579 * positioning from the underlying pipe. Check the cursor plane's
10580 * blending properties match the primary plane's. */
10582 new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
10583 new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
10584 if (!new_cursor_state || !new_primary_state ||
10585 !new_cursor_state->fb || !new_primary_state->fb) {
10589 cursor_scale_w = new_cursor_state->crtc_w * 1000 /
10590 (new_cursor_state->src_w >> 16);
10591 cursor_scale_h = new_cursor_state->crtc_h * 1000 /
10592 (new_cursor_state->src_h >> 16);
10594 primary_scale_w = new_primary_state->crtc_w * 1000 /
10595 (new_primary_state->src_w >> 16);
10596 primary_scale_h = new_primary_state->crtc_h * 1000 /
10597 (new_primary_state->src_h >> 16);
10599 if (cursor_scale_w != primary_scale_w ||
10600 cursor_scale_h != primary_scale_h) {
10601 drm_dbg_atomic(crtc->dev, "Cursor plane scaling doesn't match primary plane\n");
10608 #if defined(CONFIG_DRM_AMD_DC_DCN)
10609 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10611 struct drm_connector *connector;
10612 struct drm_connector_state *conn_state, *old_conn_state;
10613 struct amdgpu_dm_connector *aconnector = NULL;
10615 for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
10616 if (!conn_state->crtc)
10617 conn_state = old_conn_state;
10619 if (conn_state->crtc != crtc)
10622 aconnector = to_amdgpu_dm_connector(connector);
10623 if (!aconnector->port || !aconnector->mst_port)
10632 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10636 static int validate_overlay(struct drm_atomic_state *state)
10639 struct drm_plane *plane;
10640 struct drm_plane_state *new_plane_state;
10641 struct drm_plane_state *primary_state, *overlay_state = NULL;
10643 /* Check if primary plane is contained inside overlay */
10644 for_each_new_plane_in_state_reverse(state, plane, new_plane_state, i) {
10645 if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
10646 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10649 overlay_state = new_plane_state;
10654 /* check if we're making changes to the overlay plane */
10655 if (!overlay_state)
10658 /* check if overlay plane is enabled */
10659 if (!overlay_state->crtc)
10662 /* find the primary plane for the CRTC that the overlay is enabled on */
10663 primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
10664 if (IS_ERR(primary_state))
10665 return PTR_ERR(primary_state);
10667 /* check if primary plane is enabled */
10668 if (!primary_state->crtc)
10671 /* Perform the bounds check to ensure the overlay plane covers the primary */
10672 if (primary_state->crtc_x < overlay_state->crtc_x ||
10673 primary_state->crtc_y < overlay_state->crtc_y ||
10674 primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
10675 primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
10676 DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
10684 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10685 * @dev: The DRM device
10686 * @state: The atomic state to commit
10688 * Validate that the given atomic state is programmable by DC into hardware.
10689 * This involves constructing a &struct dc_state reflecting the new hardware
10690 * state we wish to commit, then querying DC to see if it is programmable. It's
10691 * important not to modify the existing DC state. Otherwise, atomic_check
10692 * may unexpectedly commit hardware changes.
10694 * When validating the DC state, it's important that the right locks are
10695 * acquired. For full updates case which removes/adds/updates streams on one
10696 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10697 * that any such full update commit will wait for completion of any outstanding
10698 * flip using DRMs synchronization events.
10700 * Note that DM adds the affected connectors for all CRTCs in state, when that
10701 * might not seem necessary. This is because DC stream creation requires the
10702 * DC sink, which is tied to the DRM connector state. Cleaning this up should
10703 * be possible but non-trivial - a possible TODO item.
10705 * Return: -Error code if validation failed.
10707 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10708 struct drm_atomic_state *state)
10710 struct amdgpu_device *adev = drm_to_adev(dev);
10711 struct dm_atomic_state *dm_state = NULL;
10712 struct dc *dc = adev->dm.dc;
10713 struct drm_connector *connector;
10714 struct drm_connector_state *old_con_state, *new_con_state;
10715 struct drm_crtc *crtc;
10716 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10717 struct drm_plane *plane;
10718 struct drm_plane_state *old_plane_state, *new_plane_state;
10719 enum dc_status status;
10721 bool lock_and_validation_needed = false;
10722 struct dm_crtc_state *dm_old_crtc_state;
10723 #if defined(CONFIG_DRM_AMD_DC_DCN)
10724 struct dsc_mst_fairness_vars vars[MAX_PIPES];
10727 trace_amdgpu_dm_atomic_check_begin(state);
10729 ret = drm_atomic_helper_check_modeset(dev, state);
10733 /* Check connector changes */
10734 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10735 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10736 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10738 /* Skip connectors that are disabled or part of modeset already. */
10739 if (!old_con_state->crtc && !new_con_state->crtc)
10742 if (!new_con_state->crtc)
10745 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10746 if (IS_ERR(new_crtc_state)) {
10747 ret = PTR_ERR(new_crtc_state);
10751 if (dm_old_con_state->abm_level !=
10752 dm_new_con_state->abm_level)
10753 new_crtc_state->connectors_changed = true;
10756 #if defined(CONFIG_DRM_AMD_DC_DCN)
10757 if (dc_resource_is_dsc_encoding_supported(dc)) {
10758 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10759 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10760 ret = add_affected_mst_dsc_crtcs(state, crtc);
10767 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10768 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10770 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10771 !new_crtc_state->color_mgmt_changed &&
10772 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10773 dm_old_crtc_state->dsc_force_changed == false)
10776 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
10780 if (!new_crtc_state->enable)
10783 ret = drm_atomic_add_affected_connectors(state, crtc);
10787 ret = drm_atomic_add_affected_planes(state, crtc);
10791 if (dm_old_crtc_state->dsc_force_changed)
10792 new_crtc_state->mode_changed = true;
10796 * Add all primary and overlay planes on the CRTC to the state
10797 * whenever a plane is enabled to maintain correct z-ordering
10798 * and to enable fast surface updates.
10800 drm_for_each_crtc(crtc, dev) {
10801 bool modified = false;
10803 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10804 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10807 if (new_plane_state->crtc == crtc ||
10808 old_plane_state->crtc == crtc) {
10817 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10818 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10822 drm_atomic_get_plane_state(state, plane);
10824 if (IS_ERR(new_plane_state)) {
10825 ret = PTR_ERR(new_plane_state);
10831 /* Remove exiting planes if they are modified */
10832 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10833 ret = dm_update_plane_state(dc, state, plane,
10837 &lock_and_validation_needed);
10842 /* Disable all crtcs which require disable */
10843 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10844 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10848 &lock_and_validation_needed);
10853 /* Enable all crtcs which require enable */
10854 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10855 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10859 &lock_and_validation_needed);
10864 ret = validate_overlay(state);
10868 /* Add new/modified planes */
10869 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10870 ret = dm_update_plane_state(dc, state, plane,
10874 &lock_and_validation_needed);
10879 /* Run this here since we want to validate the streams we created */
10880 ret = drm_atomic_helper_check_planes(dev, state);
10884 /* Check cursor planes scaling */
10885 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10886 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10891 if (state->legacy_cursor_update) {
10893 * This is a fast cursor update coming from the plane update
10894 * helper, check if it can be done asynchronously for better
10897 state->async_update =
10898 !drm_atomic_helper_async_check(dev, state);
10901 * Skip the remaining global validation if this is an async
10902 * update. Cursor updates can be done without affecting
10903 * state or bandwidth calcs and this avoids the performance
10904 * penalty of locking the private state object and
10905 * allocating a new dc_state.
10907 if (state->async_update)
10911 /* Check scaling and underscan changes*/
10912 /* TODO Removed scaling changes validation due to inability to commit
10913 * new stream into context w\o causing full reset. Need to
10914 * decide how to handle.
10916 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10917 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10918 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10919 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10921 /* Skip any modesets/resets */
10922 if (!acrtc || drm_atomic_crtc_needs_modeset(
10923 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10926 /* Skip any thing not scale or underscan changes */
10927 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10930 lock_and_validation_needed = true;
10934 * Streams and planes are reset when there are changes that affect
10935 * bandwidth. Anything that affects bandwidth needs to go through
10936 * DC global validation to ensure that the configuration can be applied
10939 * We have to currently stall out here in atomic_check for outstanding
10940 * commits to finish in this case because our IRQ handlers reference
10941 * DRM state directly - we can end up disabling interrupts too early
10944 * TODO: Remove this stall and drop DM state private objects.
10946 if (lock_and_validation_needed) {
10947 ret = dm_atomic_get_state(state, &dm_state);
10951 ret = do_aquire_global_lock(dev, state);
10955 #if defined(CONFIG_DRM_AMD_DC_DCN)
10956 if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars))
10959 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
10965 * Perform validation of MST topology in the state:
10966 * We need to perform MST atomic check before calling
10967 * dc_validate_global_state(), or there is a chance
10968 * to get stuck in an infinite loop and hang eventually.
10970 ret = drm_dp_mst_atomic_check(state);
10973 status = dc_validate_global_state(dc, dm_state->context, false);
10974 if (status != DC_OK) {
10975 drm_dbg_atomic(dev,
10976 "DC global validation failure: %s (%d)",
10977 dc_status_to_str(status), status);
10983 * The commit is a fast update. Fast updates shouldn't change
10984 * the DC context, affect global validation, and can have their
10985 * commit work done in parallel with other commits not touching
10986 * the same resource. If we have a new DC context as part of
10987 * the DM atomic state from validation we need to free it and
10988 * retain the existing one instead.
10990 * Furthermore, since the DM atomic state only contains the DC
10991 * context and can safely be annulled, we can free the state
10992 * and clear the associated private object now to free
10993 * some memory and avoid a possible use-after-free later.
10996 for (i = 0; i < state->num_private_objs; i++) {
10997 struct drm_private_obj *obj = state->private_objs[i].ptr;
10999 if (obj->funcs == adev->dm.atomic_obj.funcs) {
11000 int j = state->num_private_objs-1;
11002 dm_atomic_destroy_state(obj,
11003 state->private_objs[i].state);
11005 /* If i is not at the end of the array then the
11006 * last element needs to be moved to where i was
11007 * before the array can safely be truncated.
11010 state->private_objs[i] =
11011 state->private_objs[j];
11013 state->private_objs[j].ptr = NULL;
11014 state->private_objs[j].state = NULL;
11015 state->private_objs[j].old_state = NULL;
11016 state->private_objs[j].new_state = NULL;
11018 state->num_private_objs = j;
11024 /* Store the overall update type for use later in atomic check. */
11025 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11026 struct dm_crtc_state *dm_new_crtc_state =
11027 to_dm_crtc_state(new_crtc_state);
11029 dm_new_crtc_state->update_type = lock_and_validation_needed ?
11034 /* Must be success */
11037 trace_amdgpu_dm_atomic_check_finish(state, ret);
11042 if (ret == -EDEADLK)
11043 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
11044 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
11045 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
11047 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
11049 trace_amdgpu_dm_atomic_check_finish(state, ret);
11054 static bool is_dp_capable_without_timing_msa(struct dc *dc,
11055 struct amdgpu_dm_connector *amdgpu_dm_connector)
11058 bool capable = false;
11060 if (amdgpu_dm_connector->dc_link &&
11061 dm_helpers_dp_read_dpcd(
11063 amdgpu_dm_connector->dc_link,
11064 DP_DOWN_STREAM_PORT_COUNT,
11066 sizeof(dpcd_data))) {
11067 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11073 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11074 unsigned int offset,
11075 unsigned int total_length,
11077 unsigned int length,
11078 struct amdgpu_hdmi_vsdb_info *vsdb)
11081 union dmub_rb_cmd cmd;
11082 struct dmub_cmd_send_edid_cea *input;
11083 struct dmub_cmd_edid_cea_output *output;
11085 if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11088 memset(&cmd, 0, sizeof(cmd));
11090 input = &cmd.edid_cea.data.input;
11092 cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11093 cmd.edid_cea.header.sub_type = 0;
11094 cmd.edid_cea.header.payload_bytes =
11095 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11096 input->offset = offset;
11097 input->length = length;
11098 input->total_length = total_length;
11099 memcpy(input->payload, data, length);
11101 res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11103 DRM_ERROR("EDID CEA parser failed\n");
11107 output = &cmd.edid_cea.data.output;
11109 if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11110 if (!output->ack.success) {
11111 DRM_ERROR("EDID CEA ack failed at offset %d\n",
11112 output->ack.offset);
11114 } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11115 if (!output->amd_vsdb.vsdb_found)
11118 vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11119 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11120 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11121 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11123 DRM_WARN("Unknown EDID CEA parser results\n");
11130 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
11131 uint8_t *edid_ext, int len,
11132 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11136 /* send extension block to DMCU for parsing */
11137 for (i = 0; i < len; i += 8) {
11141 /* send 8 bytes a time */
11142 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
11146 /* EDID block sent completed, expect result */
11147 int version, min_rate, max_rate;
11149 res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
11151 /* amd vsdb found */
11152 vsdb_info->freesync_supported = 1;
11153 vsdb_info->amd_vsdb_version = version;
11154 vsdb_info->min_refresh_rate_hz = min_rate;
11155 vsdb_info->max_refresh_rate_hz = max_rate;
11163 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
11171 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11172 uint8_t *edid_ext, int len,
11173 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11177 /* send extension block to DMCU for parsing */
11178 for (i = 0; i < len; i += 8) {
11179 /* send 8 bytes a time */
11180 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11184 return vsdb_info->freesync_supported;
11187 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11188 uint8_t *edid_ext, int len,
11189 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11191 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11193 if (adev->dm.dmub_srv)
11194 return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11196 return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11199 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11200 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11202 uint8_t *edid_ext = NULL;
11204 bool valid_vsdb_found = false;
11206 /*----- drm_find_cea_extension() -----*/
11207 /* No EDID or EDID extensions */
11208 if (edid == NULL || edid->extensions == 0)
11211 /* Find CEA extension */
11212 for (i = 0; i < edid->extensions; i++) {
11213 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11214 if (edid_ext[0] == CEA_EXT)
11218 if (i == edid->extensions)
11221 /*----- cea_db_offsets() -----*/
11222 if (edid_ext[0] != CEA_EXT)
11225 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11227 return valid_vsdb_found ? i : -ENODEV;
11230 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11234 struct detailed_timing *timing;
11235 struct detailed_non_pixel *data;
11236 struct detailed_data_monitor_range *range;
11237 struct amdgpu_dm_connector *amdgpu_dm_connector =
11238 to_amdgpu_dm_connector(connector);
11239 struct dm_connector_state *dm_con_state = NULL;
11241 struct drm_device *dev = connector->dev;
11242 struct amdgpu_device *adev = drm_to_adev(dev);
11243 bool freesync_capable = false;
11244 struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11246 if (!connector->state) {
11247 DRM_ERROR("%s - Connector has no state", __func__);
11252 dm_con_state = to_dm_connector_state(connector->state);
11254 amdgpu_dm_connector->min_vfreq = 0;
11255 amdgpu_dm_connector->max_vfreq = 0;
11256 amdgpu_dm_connector->pixel_clock_mhz = 0;
11261 dm_con_state = to_dm_connector_state(connector->state);
11263 if (!amdgpu_dm_connector->dc_sink) {
11264 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
11267 if (!adev->dm.freesync_module)
11271 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11272 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
11273 bool edid_check_required = false;
11276 edid_check_required = is_dp_capable_without_timing_msa(
11278 amdgpu_dm_connector);
11281 if (edid_check_required == true && (edid->version > 1 ||
11282 (edid->version == 1 && edid->revision > 1))) {
11283 for (i = 0; i < 4; i++) {
11285 timing = &edid->detailed_timings[i];
11286 data = &timing->data.other_data;
11287 range = &data->data.range;
11289 * Check if monitor has continuous frequency mode
11291 if (data->type != EDID_DETAIL_MONITOR_RANGE)
11294 * Check for flag range limits only. If flag == 1 then
11295 * no additional timing information provided.
11296 * Default GTF, GTF Secondary curve and CVT are not
11299 if (range->flags != 1)
11302 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11303 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11304 amdgpu_dm_connector->pixel_clock_mhz =
11305 range->pixel_clock_mhz * 10;
11307 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11308 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11313 if (amdgpu_dm_connector->max_vfreq -
11314 amdgpu_dm_connector->min_vfreq > 10) {
11316 freesync_capable = true;
11319 } else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11320 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11321 if (i >= 0 && vsdb_info.freesync_supported) {
11322 timing = &edid->detailed_timings[i];
11323 data = &timing->data.other_data;
11325 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11326 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11327 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11328 freesync_capable = true;
11330 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11331 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11337 dm_con_state->freesync_capable = freesync_capable;
11339 if (connector->vrr_capable_property)
11340 drm_connector_set_vrr_capable_property(connector,
11344 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11346 struct amdgpu_device *adev = drm_to_adev(dev);
11347 struct dc *dc = adev->dm.dc;
11350 mutex_lock(&adev->dm.dc_lock);
11351 if (dc->current_state) {
11352 for (i = 0; i < dc->current_state->stream_count; ++i)
11353 dc->current_state->streams[i]
11354 ->triggered_crtc_reset.enabled =
11355 adev->dm.force_timing_sync;
11357 dm_enable_per_frame_crtc_master_sync(dc->current_state);
11358 dc_trigger_sync(dc, dc->current_state);
11360 mutex_unlock(&adev->dm.dc_lock);
11363 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11364 uint32_t value, const char *func_name)
11366 #ifdef DM_CHECK_ADDR_0
11367 if (address == 0) {
11368 DC_ERR("invalid register write. address = 0");
11372 cgs_write_register(ctx->cgs_device, address, value);
11373 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11376 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11377 const char *func_name)
11380 #ifdef DM_CHECK_ADDR_0
11381 if (address == 0) {
11382 DC_ERR("invalid register read; address = 0\n");
11387 if (ctx->dmub_srv &&
11388 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11389 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11394 value = cgs_read_register(ctx->cgs_device, address);
11396 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11401 int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int linkIndex,
11402 struct aux_payload *payload, enum aux_return_code_type *operation_result)
11404 struct amdgpu_device *adev = ctx->driver_context;
11407 dc_process_dmub_aux_transfer_async(ctx->dc, linkIndex, payload);
11408 ret = wait_for_completion_interruptible_timeout(&adev->dm.dmub_aux_transfer_done, 10*HZ);
11410 *operation_result = AUX_RET_ERROR_TIMEOUT;
11413 *operation_result = (enum aux_return_code_type)adev->dm.dmub_notify->result;
11415 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11416 (*payload->reply) = adev->dm.dmub_notify->aux_reply.command;
11418 // For read case, Copy data to payload
11419 if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11420 (*payload->reply == AUX_TRANSACTION_REPLY_AUX_ACK))
11421 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11422 adev->dm.dmub_notify->aux_reply.length);
11425 return adev->dm.dmub_notify->aux_reply.length;