649bdd5be95121b877bcc996f5934b2617c73858
[platform/kernel/linux-starfive.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "link_enc_cfg.h"
33 #include "dc/inc/core_types.h"
34 #include "dal_asic_id.h"
35 #include "dmub/dmub_srv.h"
36 #include "dc/inc/hw/dmcu.h"
37 #include "dc/inc/hw/abm.h"
38 #include "dc/dc_dmub_srv.h"
39 #include "dc/dc_edid_parser.h"
40 #include "dc/dc_stat.h"
41 #include "amdgpu_dm_trace.h"
42
43 #include "vid.h"
44 #include "amdgpu.h"
45 #include "amdgpu_display.h"
46 #include "amdgpu_ucode.h"
47 #include "atom.h"
48 #include "amdgpu_dm.h"
49 #include "amdgpu_dm_plane.h"
50 #include "amdgpu_dm_crtc.h"
51 #ifdef CONFIG_DRM_AMD_DC_HDCP
52 #include "amdgpu_dm_hdcp.h"
53 #include <drm/display/drm_hdcp_helper.h>
54 #endif
55 #include "amdgpu_pm.h"
56 #include "amdgpu_atombios.h"
57
58 #include "amd_shared.h"
59 #include "amdgpu_dm_irq.h"
60 #include "dm_helpers.h"
61 #include "amdgpu_dm_mst_types.h"
62 #if defined(CONFIG_DEBUG_FS)
63 #include "amdgpu_dm_debugfs.h"
64 #endif
65 #include "amdgpu_dm_psr.h"
66
67 #include "ivsrcid/ivsrcid_vislands30.h"
68
69 #include "i2caux_interface.h"
70 #include <linux/module.h>
71 #include <linux/moduleparam.h>
72 #include <linux/types.h>
73 #include <linux/pm_runtime.h>
74 #include <linux/pci.h>
75 #include <linux/firmware.h>
76 #include <linux/component.h>
77 #include <linux/dmi.h>
78
79 #include <drm/display/drm_dp_mst_helper.h>
80 #include <drm/display/drm_hdmi_helper.h>
81 #include <drm/drm_atomic.h>
82 #include <drm/drm_atomic_uapi.h>
83 #include <drm/drm_atomic_helper.h>
84 #include <drm/drm_blend.h>
85 #include <drm/drm_fourcc.h>
86 #include <drm/drm_edid.h>
87 #include <drm/drm_vblank.h>
88 #include <drm/drm_audio_component.h>
89 #include <drm/drm_gem_atomic_helper.h>
90 #include <drm/drm_plane_helper.h>
91
92 #include <acpi/video.h>
93
94 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
95
96 #include "dcn/dcn_1_0_offset.h"
97 #include "dcn/dcn_1_0_sh_mask.h"
98 #include "soc15_hw_ip.h"
99 #include "soc15_common.h"
100 #include "vega10_ip_offset.h"
101
102 #include "gc/gc_11_0_0_offset.h"
103 #include "gc/gc_11_0_0_sh_mask.h"
104
105 #include "modules/inc/mod_freesync.h"
106 #include "modules/power/power_helpers.h"
107 #include "modules/inc/mod_info_packet.h"
108
109 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
110 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
111 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
112 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
113 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
114 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
115 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
116 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
117 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
118 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
119 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
120 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
121 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
122 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
123 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
124 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
125 #define FIRMWARE_DCN_314_DMUB "amdgpu/dcn_3_1_4_dmcub.bin"
126 MODULE_FIRMWARE(FIRMWARE_DCN_314_DMUB);
127 #define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin"
128 MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB);
129 #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
130 MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);
131
132 #define FIRMWARE_DCN_V3_2_0_DMCUB "amdgpu/dcn_3_2_0_dmcub.bin"
133 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_0_DMCUB);
134 #define FIRMWARE_DCN_V3_2_1_DMCUB "amdgpu/dcn_3_2_1_dmcub.bin"
135 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_1_DMCUB);
136
137 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
138 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
139
140 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
141 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
142
143 /* Number of bytes in PSP header for firmware. */
144 #define PSP_HEADER_BYTES 0x100
145
146 /* Number of bytes in PSP footer for firmware. */
147 #define PSP_FOOTER_BYTES 0x100
148
149 /**
150  * DOC: overview
151  *
152  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
153  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
154  * requests into DC requests, and DC responses into DRM responses.
155  *
156  * The root control structure is &struct amdgpu_display_manager.
157  */
158
159 /* basic init/fini API */
160 static int amdgpu_dm_init(struct amdgpu_device *adev);
161 static void amdgpu_dm_fini(struct amdgpu_device *adev);
162 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
163
164 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
165 {
166         switch (link->dpcd_caps.dongle_type) {
167         case DISPLAY_DONGLE_NONE:
168                 return DRM_MODE_SUBCONNECTOR_Native;
169         case DISPLAY_DONGLE_DP_VGA_CONVERTER:
170                 return DRM_MODE_SUBCONNECTOR_VGA;
171         case DISPLAY_DONGLE_DP_DVI_CONVERTER:
172         case DISPLAY_DONGLE_DP_DVI_DONGLE:
173                 return DRM_MODE_SUBCONNECTOR_DVID;
174         case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
175         case DISPLAY_DONGLE_DP_HDMI_DONGLE:
176                 return DRM_MODE_SUBCONNECTOR_HDMIA;
177         case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
178         default:
179                 return DRM_MODE_SUBCONNECTOR_Unknown;
180         }
181 }
182
183 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
184 {
185         struct dc_link *link = aconnector->dc_link;
186         struct drm_connector *connector = &aconnector->base;
187         enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
188
189         if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
190                 return;
191
192         if (aconnector->dc_sink)
193                 subconnector = get_subconnector_type(link);
194
195         drm_object_property_set_value(&connector->base,
196                         connector->dev->mode_config.dp_subconnector_property,
197                         subconnector);
198 }
199
200 /*
201  * initializes drm_device display related structures, based on the information
202  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
203  * drm_encoder, drm_mode_config
204  *
205  * Returns 0 on success
206  */
207 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
208 /* removes and deallocates the drm structures, created by the above function */
209 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
210
211 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
212                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
213                                     uint32_t link_index,
214                                     struct amdgpu_encoder *amdgpu_encoder);
215 static int amdgpu_dm_encoder_init(struct drm_device *dev,
216                                   struct amdgpu_encoder *aencoder,
217                                   uint32_t link_index);
218
219 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
220
221 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
222
223 static int amdgpu_dm_atomic_check(struct drm_device *dev,
224                                   struct drm_atomic_state *state);
225
226 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
227 static void handle_hpd_rx_irq(void *param);
228
229 static bool
230 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
231                                  struct drm_crtc_state *new_crtc_state);
232 /*
233  * dm_vblank_get_counter
234  *
235  * @brief
236  * Get counter for number of vertical blanks
237  *
238  * @param
239  * struct amdgpu_device *adev - [in] desired amdgpu device
240  * int disp_idx - [in] which CRTC to get the counter from
241  *
242  * @return
243  * Counter for vertical blanks
244  */
245 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
246 {
247         if (crtc >= adev->mode_info.num_crtc)
248                 return 0;
249         else {
250                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
251
252                 if (acrtc->dm_irq_params.stream == NULL) {
253                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
254                                   crtc);
255                         return 0;
256                 }
257
258                 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
259         }
260 }
261
262 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
263                                   u32 *vbl, u32 *position)
264 {
265         uint32_t v_blank_start, v_blank_end, h_position, v_position;
266
267         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
268                 return -EINVAL;
269         else {
270                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
271
272                 if (acrtc->dm_irq_params.stream ==  NULL) {
273                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
274                                   crtc);
275                         return 0;
276                 }
277
278                 /*
279                  * TODO rework base driver to use values directly.
280                  * for now parse it back into reg-format
281                  */
282                 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
283                                          &v_blank_start,
284                                          &v_blank_end,
285                                          &h_position,
286                                          &v_position);
287
288                 *position = v_position | (h_position << 16);
289                 *vbl = v_blank_start | (v_blank_end << 16);
290         }
291
292         return 0;
293 }
294
295 static bool dm_is_idle(void *handle)
296 {
297         /* XXX todo */
298         return true;
299 }
300
301 static int dm_wait_for_idle(void *handle)
302 {
303         /* XXX todo */
304         return 0;
305 }
306
307 static bool dm_check_soft_reset(void *handle)
308 {
309         return false;
310 }
311
312 static int dm_soft_reset(void *handle)
313 {
314         /* XXX todo */
315         return 0;
316 }
317
318 static struct amdgpu_crtc *
319 get_crtc_by_otg_inst(struct amdgpu_device *adev,
320                      int otg_inst)
321 {
322         struct drm_device *dev = adev_to_drm(adev);
323         struct drm_crtc *crtc;
324         struct amdgpu_crtc *amdgpu_crtc;
325
326         if (WARN_ON(otg_inst == -1))
327                 return adev->mode_info.crtcs[0];
328
329         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
330                 amdgpu_crtc = to_amdgpu_crtc(crtc);
331
332                 if (amdgpu_crtc->otg_inst == otg_inst)
333                         return amdgpu_crtc;
334         }
335
336         return NULL;
337 }
338
339 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
340                                               struct dm_crtc_state *new_state)
341 {
342         if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
343                 return true;
344         else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
345                 return true;
346         else
347                 return false;
348 }
349
350 /**
351  * dm_pflip_high_irq() - Handle pageflip interrupt
352  * @interrupt_params: ignored
353  *
354  * Handles the pageflip interrupt by notifying all interested parties
355  * that the pageflip has been completed.
356  */
357 static void dm_pflip_high_irq(void *interrupt_params)
358 {
359         struct amdgpu_crtc *amdgpu_crtc;
360         struct common_irq_params *irq_params = interrupt_params;
361         struct amdgpu_device *adev = irq_params->adev;
362         unsigned long flags;
363         struct drm_pending_vblank_event *e;
364         uint32_t vpos, hpos, v_blank_start, v_blank_end;
365         bool vrr_active;
366
367         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
368
369         /* IRQ could occur when in initial stage */
370         /* TODO work and BO cleanup */
371         if (amdgpu_crtc == NULL) {
372                 DC_LOG_PFLIP("CRTC is null, returning.\n");
373                 return;
374         }
375
376         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
377
378         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
379                 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
380                                                  amdgpu_crtc->pflip_status,
381                                                  AMDGPU_FLIP_SUBMITTED,
382                                                  amdgpu_crtc->crtc_id,
383                                                  amdgpu_crtc);
384                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
385                 return;
386         }
387
388         /* page flip completed. */
389         e = amdgpu_crtc->event;
390         amdgpu_crtc->event = NULL;
391
392         WARN_ON(!e);
393
394         vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
395
396         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
397         if (!vrr_active ||
398             !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
399                                       &v_blank_end, &hpos, &vpos) ||
400             (vpos < v_blank_start)) {
401                 /* Update to correct count and vblank timestamp if racing with
402                  * vblank irq. This also updates to the correct vblank timestamp
403                  * even in VRR mode, as scanout is past the front-porch atm.
404                  */
405                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
406
407                 /* Wake up userspace by sending the pageflip event with proper
408                  * count and timestamp of vblank of flip completion.
409                  */
410                 if (e) {
411                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
412
413                         /* Event sent, so done with vblank for this flip */
414                         drm_crtc_vblank_put(&amdgpu_crtc->base);
415                 }
416         } else if (e) {
417                 /* VRR active and inside front-porch: vblank count and
418                  * timestamp for pageflip event will only be up to date after
419                  * drm_crtc_handle_vblank() has been executed from late vblank
420                  * irq handler after start of back-porch (vline 0). We queue the
421                  * pageflip event for send-out by drm_crtc_handle_vblank() with
422                  * updated timestamp and count, once it runs after us.
423                  *
424                  * We need to open-code this instead of using the helper
425                  * drm_crtc_arm_vblank_event(), as that helper would
426                  * call drm_crtc_accurate_vblank_count(), which we must
427                  * not call in VRR mode while we are in front-porch!
428                  */
429
430                 /* sequence will be replaced by real count during send-out. */
431                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
432                 e->pipe = amdgpu_crtc->crtc_id;
433
434                 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
435                 e = NULL;
436         }
437
438         /* Keep track of vblank of this flip for flip throttling. We use the
439          * cooked hw counter, as that one incremented at start of this vblank
440          * of pageflip completion, so last_flip_vblank is the forbidden count
441          * for queueing new pageflips if vsync + VRR is enabled.
442          */
443         amdgpu_crtc->dm_irq_params.last_flip_vblank =
444                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
445
446         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
447         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
448
449         DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
450                      amdgpu_crtc->crtc_id, amdgpu_crtc,
451                      vrr_active, (int) !e);
452 }
453
454 static void dm_vupdate_high_irq(void *interrupt_params)
455 {
456         struct common_irq_params *irq_params = interrupt_params;
457         struct amdgpu_device *adev = irq_params->adev;
458         struct amdgpu_crtc *acrtc;
459         struct drm_device *drm_dev;
460         struct drm_vblank_crtc *vblank;
461         ktime_t frame_duration_ns, previous_timestamp;
462         unsigned long flags;
463         int vrr_active;
464
465         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
466
467         if (acrtc) {
468                 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
469                 drm_dev = acrtc->base.dev;
470                 vblank = &drm_dev->vblank[acrtc->base.index];
471                 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
472                 frame_duration_ns = vblank->time - previous_timestamp;
473
474                 if (frame_duration_ns > 0) {
475                         trace_amdgpu_refresh_rate_track(acrtc->base.index,
476                                                 frame_duration_ns,
477                                                 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
478                         atomic64_set(&irq_params->previous_timestamp, vblank->time);
479                 }
480
481                 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
482                               acrtc->crtc_id,
483                               vrr_active);
484
485                 /* Core vblank handling is done here after end of front-porch in
486                  * vrr mode, as vblank timestamping will give valid results
487                  * while now done after front-porch. This will also deliver
488                  * page-flip completion events that have been queued to us
489                  * if a pageflip happened inside front-porch.
490                  */
491                 if (vrr_active) {
492                         dm_crtc_handle_vblank(acrtc);
493
494                         /* BTR processing for pre-DCE12 ASICs */
495                         if (acrtc->dm_irq_params.stream &&
496                             adev->family < AMDGPU_FAMILY_AI) {
497                                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
498                                 mod_freesync_handle_v_update(
499                                     adev->dm.freesync_module,
500                                     acrtc->dm_irq_params.stream,
501                                     &acrtc->dm_irq_params.vrr_params);
502
503                                 dc_stream_adjust_vmin_vmax(
504                                     adev->dm.dc,
505                                     acrtc->dm_irq_params.stream,
506                                     &acrtc->dm_irq_params.vrr_params.adjust);
507                                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
508                         }
509                 }
510         }
511 }
512
513 /**
514  * dm_crtc_high_irq() - Handles CRTC interrupt
515  * @interrupt_params: used for determining the CRTC instance
516  *
517  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
518  * event handler.
519  */
520 static void dm_crtc_high_irq(void *interrupt_params)
521 {
522         struct common_irq_params *irq_params = interrupt_params;
523         struct amdgpu_device *adev = irq_params->adev;
524         struct amdgpu_crtc *acrtc;
525         unsigned long flags;
526         int vrr_active;
527
528         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
529         if (!acrtc)
530                 return;
531
532         vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
533
534         DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
535                       vrr_active, acrtc->dm_irq_params.active_planes);
536
537         /**
538          * Core vblank handling at start of front-porch is only possible
539          * in non-vrr mode, as only there vblank timestamping will give
540          * valid results while done in front-porch. Otherwise defer it
541          * to dm_vupdate_high_irq after end of front-porch.
542          */
543         if (!vrr_active)
544                 dm_crtc_handle_vblank(acrtc);
545
546         /**
547          * Following stuff must happen at start of vblank, for crc
548          * computation and below-the-range btr support in vrr mode.
549          */
550         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
551
552         /* BTR updates need to happen before VUPDATE on Vega and above. */
553         if (adev->family < AMDGPU_FAMILY_AI)
554                 return;
555
556         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
557
558         if (acrtc->dm_irq_params.stream &&
559             acrtc->dm_irq_params.vrr_params.supported &&
560             acrtc->dm_irq_params.freesync_config.state ==
561                     VRR_STATE_ACTIVE_VARIABLE) {
562                 mod_freesync_handle_v_update(adev->dm.freesync_module,
563                                              acrtc->dm_irq_params.stream,
564                                              &acrtc->dm_irq_params.vrr_params);
565
566                 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
567                                            &acrtc->dm_irq_params.vrr_params.adjust);
568         }
569
570         /*
571          * If there aren't any active_planes then DCH HUBP may be clock-gated.
572          * In that case, pageflip completion interrupts won't fire and pageflip
573          * completion events won't get delivered. Prevent this by sending
574          * pending pageflip events from here if a flip is still pending.
575          *
576          * If any planes are enabled, use dm_pflip_high_irq() instead, to
577          * avoid race conditions between flip programming and completion,
578          * which could cause too early flip completion events.
579          */
580         if (adev->family >= AMDGPU_FAMILY_RV &&
581             acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
582             acrtc->dm_irq_params.active_planes == 0) {
583                 if (acrtc->event) {
584                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
585                         acrtc->event = NULL;
586                         drm_crtc_vblank_put(&acrtc->base);
587                 }
588                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
589         }
590
591         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
592 }
593
594 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
595 /**
596  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
597  * DCN generation ASICs
598  * @interrupt_params: interrupt parameters
599  *
600  * Used to set crc window/read out crc value at vertical line 0 position
601  */
602 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
603 {
604         struct common_irq_params *irq_params = interrupt_params;
605         struct amdgpu_device *adev = irq_params->adev;
606         struct amdgpu_crtc *acrtc;
607
608         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
609
610         if (!acrtc)
611                 return;
612
613         amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
614 }
615 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
616
617 /**
618  * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
619  * @adev: amdgpu_device pointer
620  * @notify: dmub notification structure
621  *
622  * Dmub AUX or SET_CONFIG command completion processing callback
623  * Copies dmub notification to DM which is to be read by AUX command.
624  * issuing thread and also signals the event to wake up the thread.
625  */
626 static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
627                                         struct dmub_notification *notify)
628 {
629         if (adev->dm.dmub_notify)
630                 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
631         if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
632                 complete(&adev->dm.dmub_aux_transfer_done);
633 }
634
635 /**
636  * dmub_hpd_callback - DMUB HPD interrupt processing callback.
637  * @adev: amdgpu_device pointer
638  * @notify: dmub notification structure
639  *
640  * Dmub Hpd interrupt processing callback. Gets displayindex through the
641  * ink index and calls helper to do the processing.
642  */
643 static void dmub_hpd_callback(struct amdgpu_device *adev,
644                               struct dmub_notification *notify)
645 {
646         struct amdgpu_dm_connector *aconnector;
647         struct amdgpu_dm_connector *hpd_aconnector = NULL;
648         struct drm_connector *connector;
649         struct drm_connector_list_iter iter;
650         struct dc_link *link;
651         uint8_t link_index = 0;
652         struct drm_device *dev;
653
654         if (adev == NULL)
655                 return;
656
657         if (notify == NULL) {
658                 DRM_ERROR("DMUB HPD callback notification was NULL");
659                 return;
660         }
661
662         if (notify->link_index > adev->dm.dc->link_count) {
663                 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
664                 return;
665         }
666
667         link_index = notify->link_index;
668         link = adev->dm.dc->links[link_index];
669         dev = adev->dm.ddev;
670
671         drm_connector_list_iter_begin(dev, &iter);
672         drm_for_each_connector_iter(connector, &iter) {
673                 aconnector = to_amdgpu_dm_connector(connector);
674                 if (link && aconnector->dc_link == link) {
675                         DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
676                         hpd_aconnector = aconnector;
677                         break;
678                 }
679         }
680         drm_connector_list_iter_end(&iter);
681
682         if (hpd_aconnector) {
683                 if (notify->type == DMUB_NOTIFICATION_HPD)
684                         handle_hpd_irq_helper(hpd_aconnector);
685                 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
686                         handle_hpd_rx_irq(hpd_aconnector);
687         }
688 }
689
690 /**
691  * register_dmub_notify_callback - Sets callback for DMUB notify
692  * @adev: amdgpu_device pointer
693  * @type: Type of dmub notification
694  * @callback: Dmub interrupt callback function
695  * @dmub_int_thread_offload: offload indicator
696  *
697  * API to register a dmub callback handler for a dmub notification
698  * Also sets indicator whether callback processing to be offloaded.
699  * to dmub interrupt handling thread
700  * Return: true if successfully registered, false if there is existing registration
701  */
702 static bool register_dmub_notify_callback(struct amdgpu_device *adev,
703                                           enum dmub_notification_type type,
704                                           dmub_notify_interrupt_callback_t callback,
705                                           bool dmub_int_thread_offload)
706 {
707         if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
708                 adev->dm.dmub_callback[type] = callback;
709                 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
710         } else
711                 return false;
712
713         return true;
714 }
715
716 static void dm_handle_hpd_work(struct work_struct *work)
717 {
718         struct dmub_hpd_work *dmub_hpd_wrk;
719
720         dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
721
722         if (!dmub_hpd_wrk->dmub_notify) {
723                 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
724                 return;
725         }
726
727         if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
728                 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
729                 dmub_hpd_wrk->dmub_notify);
730         }
731
732         kfree(dmub_hpd_wrk->dmub_notify);
733         kfree(dmub_hpd_wrk);
734
735 }
736
737 #define DMUB_TRACE_MAX_READ 64
738 /**
739  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
740  * @interrupt_params: used for determining the Outbox instance
741  *
742  * Handles the Outbox Interrupt
743  * event handler.
744  */
745 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
746 {
747         struct dmub_notification notify;
748         struct common_irq_params *irq_params = interrupt_params;
749         struct amdgpu_device *adev = irq_params->adev;
750         struct amdgpu_display_manager *dm = &adev->dm;
751         struct dmcub_trace_buf_entry entry = { 0 };
752         uint32_t count = 0;
753         struct dmub_hpd_work *dmub_hpd_wrk;
754         struct dc_link *plink = NULL;
755
756         if (dc_enable_dmub_notifications(adev->dm.dc) &&
757                 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
758
759                 do {
760                         dc_stat_get_dmub_notification(adev->dm.dc, &notify);
761                         if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
762                                 DRM_ERROR("DM: notify type %d invalid!", notify.type);
763                                 continue;
764                         }
765                         if (!dm->dmub_callback[notify.type]) {
766                                 DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
767                                 continue;
768                         }
769                         if (dm->dmub_thread_offload[notify.type] == true) {
770                                 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
771                                 if (!dmub_hpd_wrk) {
772                                         DRM_ERROR("Failed to allocate dmub_hpd_wrk");
773                                         return;
774                                 }
775                                 dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
776                                 if (!dmub_hpd_wrk->dmub_notify) {
777                                         kfree(dmub_hpd_wrk);
778                                         DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
779                                         return;
780                                 }
781                                 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
782                                 if (dmub_hpd_wrk->dmub_notify)
783                                         memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
784                                 dmub_hpd_wrk->adev = adev;
785                                 if (notify.type == DMUB_NOTIFICATION_HPD) {
786                                         plink = adev->dm.dc->links[notify.link_index];
787                                         if (plink) {
788                                                 plink->hpd_status =
789                                                         notify.hpd_status == DP_HPD_PLUG;
790                                         }
791                                 }
792                                 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
793                         } else {
794                                 dm->dmub_callback[notify.type](adev, &notify);
795                         }
796                 } while (notify.pending_notification);
797         }
798
799
800         do {
801                 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
802                         trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
803                                                         entry.param0, entry.param1);
804
805                         DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
806                                  entry.trace_code, entry.tick_count, entry.param0, entry.param1);
807                 } else
808                         break;
809
810                 count++;
811
812         } while (count <= DMUB_TRACE_MAX_READ);
813
814         if (count > DMUB_TRACE_MAX_READ)
815                 DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
816 }
817
818 static int dm_set_clockgating_state(void *handle,
819                   enum amd_clockgating_state state)
820 {
821         return 0;
822 }
823
824 static int dm_set_powergating_state(void *handle,
825                   enum amd_powergating_state state)
826 {
827         return 0;
828 }
829
830 /* Prototypes of private functions */
831 static int dm_early_init(void* handle);
832
833 /* Allocate memory for FBC compressed data  */
834 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
835 {
836         struct drm_device *dev = connector->dev;
837         struct amdgpu_device *adev = drm_to_adev(dev);
838         struct dm_compressor_info *compressor = &adev->dm.compressor;
839         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
840         struct drm_display_mode *mode;
841         unsigned long max_size = 0;
842
843         if (adev->dm.dc->fbc_compressor == NULL)
844                 return;
845
846         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
847                 return;
848
849         if (compressor->bo_ptr)
850                 return;
851
852
853         list_for_each_entry(mode, &connector->modes, head) {
854                 if (max_size < mode->htotal * mode->vtotal)
855                         max_size = mode->htotal * mode->vtotal;
856         }
857
858         if (max_size) {
859                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
860                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
861                             &compressor->gpu_addr, &compressor->cpu_addr);
862
863                 if (r)
864                         DRM_ERROR("DM: Failed to initialize FBC\n");
865                 else {
866                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
867                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
868                 }
869
870         }
871
872 }
873
874 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
875                                           int pipe, bool *enabled,
876                                           unsigned char *buf, int max_bytes)
877 {
878         struct drm_device *dev = dev_get_drvdata(kdev);
879         struct amdgpu_device *adev = drm_to_adev(dev);
880         struct drm_connector *connector;
881         struct drm_connector_list_iter conn_iter;
882         struct amdgpu_dm_connector *aconnector;
883         int ret = 0;
884
885         *enabled = false;
886
887         mutex_lock(&adev->dm.audio_lock);
888
889         drm_connector_list_iter_begin(dev, &conn_iter);
890         drm_for_each_connector_iter(connector, &conn_iter) {
891                 aconnector = to_amdgpu_dm_connector(connector);
892                 if (aconnector->audio_inst != port)
893                         continue;
894
895                 *enabled = true;
896                 ret = drm_eld_size(connector->eld);
897                 memcpy(buf, connector->eld, min(max_bytes, ret));
898
899                 break;
900         }
901         drm_connector_list_iter_end(&conn_iter);
902
903         mutex_unlock(&adev->dm.audio_lock);
904
905         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
906
907         return ret;
908 }
909
910 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
911         .get_eld = amdgpu_dm_audio_component_get_eld,
912 };
913
914 static int amdgpu_dm_audio_component_bind(struct device *kdev,
915                                        struct device *hda_kdev, void *data)
916 {
917         struct drm_device *dev = dev_get_drvdata(kdev);
918         struct amdgpu_device *adev = drm_to_adev(dev);
919         struct drm_audio_component *acomp = data;
920
921         acomp->ops = &amdgpu_dm_audio_component_ops;
922         acomp->dev = kdev;
923         adev->dm.audio_component = acomp;
924
925         return 0;
926 }
927
928 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
929                                           struct device *hda_kdev, void *data)
930 {
931         struct drm_device *dev = dev_get_drvdata(kdev);
932         struct amdgpu_device *adev = drm_to_adev(dev);
933         struct drm_audio_component *acomp = data;
934
935         acomp->ops = NULL;
936         acomp->dev = NULL;
937         adev->dm.audio_component = NULL;
938 }
939
940 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
941         .bind   = amdgpu_dm_audio_component_bind,
942         .unbind = amdgpu_dm_audio_component_unbind,
943 };
944
945 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
946 {
947         int i, ret;
948
949         if (!amdgpu_audio)
950                 return 0;
951
952         adev->mode_info.audio.enabled = true;
953
954         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
955
956         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
957                 adev->mode_info.audio.pin[i].channels = -1;
958                 adev->mode_info.audio.pin[i].rate = -1;
959                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
960                 adev->mode_info.audio.pin[i].status_bits = 0;
961                 adev->mode_info.audio.pin[i].category_code = 0;
962                 adev->mode_info.audio.pin[i].connected = false;
963                 adev->mode_info.audio.pin[i].id =
964                         adev->dm.dc->res_pool->audios[i]->inst;
965                 adev->mode_info.audio.pin[i].offset = 0;
966         }
967
968         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
969         if (ret < 0)
970                 return ret;
971
972         adev->dm.audio_registered = true;
973
974         return 0;
975 }
976
977 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
978 {
979         if (!amdgpu_audio)
980                 return;
981
982         if (!adev->mode_info.audio.enabled)
983                 return;
984
985         if (adev->dm.audio_registered) {
986                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
987                 adev->dm.audio_registered = false;
988         }
989
990         /* TODO: Disable audio? */
991
992         adev->mode_info.audio.enabled = false;
993 }
994
995 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
996 {
997         struct drm_audio_component *acomp = adev->dm.audio_component;
998
999         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1000                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1001
1002                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1003                                                  pin, -1);
1004         }
1005 }
1006
1007 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1008 {
1009         const struct dmcub_firmware_header_v1_0 *hdr;
1010         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1011         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1012         const struct firmware *dmub_fw = adev->dm.dmub_fw;
1013         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1014         struct abm *abm = adev->dm.dc->res_pool->abm;
1015         struct dmub_srv_hw_params hw_params;
1016         enum dmub_status status;
1017         const unsigned char *fw_inst_const, *fw_bss_data;
1018         uint32_t i, fw_inst_const_size, fw_bss_data_size;
1019         bool has_hw_support;
1020
1021         if (!dmub_srv)
1022                 /* DMUB isn't supported on the ASIC. */
1023                 return 0;
1024
1025         if (!fb_info) {
1026                 DRM_ERROR("No framebuffer info for DMUB service.\n");
1027                 return -EINVAL;
1028         }
1029
1030         if (!dmub_fw) {
1031                 /* Firmware required for DMUB support. */
1032                 DRM_ERROR("No firmware provided for DMUB.\n");
1033                 return -EINVAL;
1034         }
1035
1036         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1037         if (status != DMUB_STATUS_OK) {
1038                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1039                 return -EINVAL;
1040         }
1041
1042         if (!has_hw_support) {
1043                 DRM_INFO("DMUB unsupported on ASIC\n");
1044                 return 0;
1045         }
1046
1047         /* Reset DMCUB if it was previously running - before we overwrite its memory. */
1048         status = dmub_srv_hw_reset(dmub_srv);
1049         if (status != DMUB_STATUS_OK)
1050                 DRM_WARN("Error resetting DMUB HW: %d\n", status);
1051
1052         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1053
1054         fw_inst_const = dmub_fw->data +
1055                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1056                         PSP_HEADER_BYTES;
1057
1058         fw_bss_data = dmub_fw->data +
1059                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1060                       le32_to_cpu(hdr->inst_const_bytes);
1061
1062         /* Copy firmware and bios info into FB memory. */
1063         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1064                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1065
1066         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1067
1068         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1069          * amdgpu_ucode_init_single_fw will load dmub firmware
1070          * fw_inst_const part to cw0; otherwise, the firmware back door load
1071          * will be done by dm_dmub_hw_init
1072          */
1073         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1074                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1075                                 fw_inst_const_size);
1076         }
1077
1078         if (fw_bss_data_size)
1079                 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1080                        fw_bss_data, fw_bss_data_size);
1081
1082         /* Copy firmware bios info into FB memory. */
1083         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1084                adev->bios_size);
1085
1086         /* Reset regions that need to be reset. */
1087         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1088         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1089
1090         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1091                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1092
1093         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1094                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1095
1096         /* Initialize hardware. */
1097         memset(&hw_params, 0, sizeof(hw_params));
1098         hw_params.fb_base = adev->gmc.fb_start;
1099         hw_params.fb_offset = adev->vm_manager.vram_base_offset;
1100
1101         /* backdoor load firmware and trigger dmub running */
1102         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1103                 hw_params.load_inst_const = true;
1104
1105         if (dmcu)
1106                 hw_params.psp_version = dmcu->psp_version;
1107
1108         for (i = 0; i < fb_info->num_fb; ++i)
1109                 hw_params.fb[i] = &fb_info->fb[i];
1110
1111         switch (adev->ip_versions[DCE_HWIP][0]) {
1112         case IP_VERSION(3, 1, 3):
1113         case IP_VERSION(3, 1, 4):
1114                 hw_params.dpia_supported = true;
1115                 hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
1116                 break;
1117         default:
1118                 break;
1119         }
1120
1121         status = dmub_srv_hw_init(dmub_srv, &hw_params);
1122         if (status != DMUB_STATUS_OK) {
1123                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1124                 return -EINVAL;
1125         }
1126
1127         /* Wait for firmware load to finish. */
1128         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1129         if (status != DMUB_STATUS_OK)
1130                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1131
1132         /* Init DMCU and ABM if available. */
1133         if (dmcu && abm) {
1134                 dmcu->funcs->dmcu_init(dmcu);
1135                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1136         }
1137
1138         if (!adev->dm.dc->ctx->dmub_srv)
1139                 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1140         if (!adev->dm.dc->ctx->dmub_srv) {
1141                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1142                 return -ENOMEM;
1143         }
1144
1145         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1146                  adev->dm.dmcub_fw_version);
1147
1148         return 0;
1149 }
1150
1151 static void dm_dmub_hw_resume(struct amdgpu_device *adev)
1152 {
1153         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1154         enum dmub_status status;
1155         bool init;
1156
1157         if (!dmub_srv) {
1158                 /* DMUB isn't supported on the ASIC. */
1159                 return;
1160         }
1161
1162         status = dmub_srv_is_hw_init(dmub_srv, &init);
1163         if (status != DMUB_STATUS_OK)
1164                 DRM_WARN("DMUB hardware init check failed: %d\n", status);
1165
1166         if (status == DMUB_STATUS_OK && init) {
1167                 /* Wait for firmware load to finish. */
1168                 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1169                 if (status != DMUB_STATUS_OK)
1170                         DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1171         } else {
1172                 /* Perform the full hardware initialization. */
1173                 dm_dmub_hw_init(adev);
1174         }
1175 }
1176
1177 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1178 {
1179         uint64_t pt_base;
1180         uint32_t logical_addr_low;
1181         uint32_t logical_addr_high;
1182         uint32_t agp_base, agp_bot, agp_top;
1183         PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1184
1185         memset(pa_config, 0, sizeof(*pa_config));
1186
1187         logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1188         pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1189
1190         if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1191                 /*
1192                  * Raven2 has a HW issue that it is unable to use the vram which
1193                  * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1194                  * workaround that increase system aperture high address (add 1)
1195                  * to get rid of the VM fault and hardware hang.
1196                  */
1197                 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1198         else
1199                 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1200
1201         agp_base = 0;
1202         agp_bot = adev->gmc.agp_start >> 24;
1203         agp_top = adev->gmc.agp_end >> 24;
1204
1205
1206         page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1207         page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1208         page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1209         page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1210         page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1211         page_table_base.low_part = lower_32_bits(pt_base);
1212
1213         pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1214         pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1215
1216         pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1217         pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1218         pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1219
1220         pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1221         pa_config->system_aperture.fb_offset = adev->vm_manager.vram_base_offset;
1222         pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1223
1224         pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1225         pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1226         pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1227
1228         pa_config->is_hvm_enabled = 0;
1229
1230 }
1231
1232 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1233 {
1234         struct hpd_rx_irq_offload_work *offload_work;
1235         struct amdgpu_dm_connector *aconnector;
1236         struct dc_link *dc_link;
1237         struct amdgpu_device *adev;
1238         enum dc_connection_type new_connection_type = dc_connection_none;
1239         unsigned long flags;
1240
1241         offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1242         aconnector = offload_work->offload_wq->aconnector;
1243
1244         if (!aconnector) {
1245                 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1246                 goto skip;
1247         }
1248
1249         adev = drm_to_adev(aconnector->base.dev);
1250         dc_link = aconnector->dc_link;
1251
1252         mutex_lock(&aconnector->hpd_lock);
1253         if (!dc_link_detect_sink(dc_link, &new_connection_type))
1254                 DRM_ERROR("KMS: Failed to detect connector\n");
1255         mutex_unlock(&aconnector->hpd_lock);
1256
1257         if (new_connection_type == dc_connection_none)
1258                 goto skip;
1259
1260         if (amdgpu_in_reset(adev))
1261                 goto skip;
1262
1263         mutex_lock(&adev->dm.dc_lock);
1264         if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1265                 dc_link_dp_handle_automated_test(dc_link);
1266         else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1267                         hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1268                         dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1269                 dc_link_dp_handle_link_loss(dc_link);
1270                 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1271                 offload_work->offload_wq->is_handling_link_loss = false;
1272                 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1273         }
1274         mutex_unlock(&adev->dm.dc_lock);
1275
1276 skip:
1277         kfree(offload_work);
1278
1279 }
1280
1281 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1282 {
1283         int max_caps = dc->caps.max_links;
1284         int i = 0;
1285         struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1286
1287         hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1288
1289         if (!hpd_rx_offload_wq)
1290                 return NULL;
1291
1292
1293         for (i = 0; i < max_caps; i++) {
1294                 hpd_rx_offload_wq[i].wq =
1295                                     create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1296
1297                 if (hpd_rx_offload_wq[i].wq == NULL) {
1298                         DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1299                         goto out_err;
1300                 }
1301
1302                 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1303         }
1304
1305         return hpd_rx_offload_wq;
1306
1307 out_err:
1308         for (i = 0; i < max_caps; i++) {
1309                 if (hpd_rx_offload_wq[i].wq)
1310                         destroy_workqueue(hpd_rx_offload_wq[i].wq);
1311         }
1312         kfree(hpd_rx_offload_wq);
1313         return NULL;
1314 }
1315
1316 struct amdgpu_stutter_quirk {
1317         u16 chip_vendor;
1318         u16 chip_device;
1319         u16 subsys_vendor;
1320         u16 subsys_device;
1321         u8 revision;
1322 };
1323
1324 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1325         /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1326         { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1327         { 0, 0, 0, 0, 0 },
1328 };
1329
1330 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1331 {
1332         const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1333
1334         while (p && p->chip_device != 0) {
1335                 if (pdev->vendor == p->chip_vendor &&
1336                     pdev->device == p->chip_device &&
1337                     pdev->subsystem_vendor == p->subsys_vendor &&
1338                     pdev->subsystem_device == p->subsys_device &&
1339                     pdev->revision == p->revision) {
1340                         return true;
1341                 }
1342                 ++p;
1343         }
1344         return false;
1345 }
1346
1347 static const struct dmi_system_id hpd_disconnect_quirk_table[] = {
1348         {
1349                 .matches = {
1350                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1351                         DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
1352                 },
1353         },
1354         {
1355                 .matches = {
1356                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1357                         DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
1358                 },
1359         },
1360         {
1361                 .matches = {
1362                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1363                         DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
1364                 },
1365         },
1366         {
1367                 .matches = {
1368                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1369                         DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"),
1370                 },
1371         },
1372         {
1373                 .matches = {
1374                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1375                         DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"),
1376                 },
1377         },
1378         {
1379                 .matches = {
1380                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1381                         DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"),
1382                 },
1383         },
1384         {
1385                 .matches = {
1386                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1387                         DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"),
1388                 },
1389         },
1390         {
1391                 .matches = {
1392                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1393                         DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"),
1394                 },
1395         },
1396         {
1397                 .matches = {
1398                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1399                         DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"),
1400                 },
1401         },
1402         {}
1403         /* TODO: refactor this from a fixed table to a dynamic option */
1404 };
1405
1406 static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
1407 {
1408         const struct dmi_system_id *dmi_id;
1409
1410         dm->aux_hpd_discon_quirk = false;
1411
1412         dmi_id = dmi_first_match(hpd_disconnect_quirk_table);
1413         if (dmi_id) {
1414                 dm->aux_hpd_discon_quirk = true;
1415                 DRM_INFO("aux_hpd_discon_quirk attached\n");
1416         }
1417 }
1418
1419 static int amdgpu_dm_init(struct amdgpu_device *adev)
1420 {
1421         struct dc_init_data init_data;
1422 #ifdef CONFIG_DRM_AMD_DC_HDCP
1423         struct dc_callback_init init_params;
1424 #endif
1425         int r;
1426
1427         adev->dm.ddev = adev_to_drm(adev);
1428         adev->dm.adev = adev;
1429
1430         /* Zero all the fields */
1431         memset(&init_data, 0, sizeof(init_data));
1432 #ifdef CONFIG_DRM_AMD_DC_HDCP
1433         memset(&init_params, 0, sizeof(init_params));
1434 #endif
1435
1436         mutex_init(&adev->dm.dpia_aux_lock);
1437         mutex_init(&adev->dm.dc_lock);
1438         mutex_init(&adev->dm.audio_lock);
1439
1440         if(amdgpu_dm_irq_init(adev)) {
1441                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1442                 goto error;
1443         }
1444
1445         init_data.asic_id.chip_family = adev->family;
1446
1447         init_data.asic_id.pci_revision_id = adev->pdev->revision;
1448         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1449         init_data.asic_id.chip_id = adev->pdev->device;
1450
1451         init_data.asic_id.vram_width = adev->gmc.vram_width;
1452         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1453         init_data.asic_id.atombios_base_address =
1454                 adev->mode_info.atom_context->bios;
1455
1456         init_data.driver = adev;
1457
1458         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1459
1460         if (!adev->dm.cgs_device) {
1461                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1462                 goto error;
1463         }
1464
1465         init_data.cgs_device = adev->dm.cgs_device;
1466
1467         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1468
1469         switch (adev->ip_versions[DCE_HWIP][0]) {
1470         case IP_VERSION(2, 1, 0):
1471                 switch (adev->dm.dmcub_fw_version) {
1472                 case 0: /* development */
1473                 case 0x1: /* linux-firmware.git hash 6d9f399 */
1474                 case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1475                         init_data.flags.disable_dmcu = false;
1476                         break;
1477                 default:
1478                         init_data.flags.disable_dmcu = true;
1479                 }
1480                 break;
1481         case IP_VERSION(2, 0, 3):
1482                 init_data.flags.disable_dmcu = true;
1483                 break;
1484         default:
1485                 break;
1486         }
1487
1488         switch (adev->asic_type) {
1489         case CHIP_CARRIZO:
1490         case CHIP_STONEY:
1491                 init_data.flags.gpu_vm_support = true;
1492                 break;
1493         default:
1494                 switch (adev->ip_versions[DCE_HWIP][0]) {
1495                 case IP_VERSION(1, 0, 0):
1496                 case IP_VERSION(1, 0, 1):
1497                         /* enable S/G on PCO and RV2 */
1498                         if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
1499                             (adev->apu_flags & AMD_APU_IS_PICASSO))
1500                                 init_data.flags.gpu_vm_support = true;
1501                         break;
1502                 case IP_VERSION(2, 1, 0):
1503                 case IP_VERSION(3, 0, 1):
1504                 case IP_VERSION(3, 1, 2):
1505                 case IP_VERSION(3, 1, 3):
1506                 case IP_VERSION(3, 1, 4):
1507                 case IP_VERSION(3, 1, 6):
1508                         init_data.flags.gpu_vm_support = true;
1509                         break;
1510                 default:
1511                         break;
1512                 }
1513                 break;
1514         }
1515
1516         if (init_data.flags.gpu_vm_support)
1517                 adev->mode_info.gpu_vm_support = true;
1518
1519         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1520                 init_data.flags.fbc_support = true;
1521
1522         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1523                 init_data.flags.multi_mon_pp_mclk_switch = true;
1524
1525         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1526                 init_data.flags.disable_fractional_pwm = true;
1527
1528         if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1529                 init_data.flags.edp_no_power_sequencing = true;
1530
1531         if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
1532                 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
1533         if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
1534                 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
1535
1536         init_data.flags.seamless_boot_edp_requested = false;
1537
1538         if (check_seamless_boot_capability(adev)) {
1539                 init_data.flags.seamless_boot_edp_requested = true;
1540                 init_data.flags.allow_seamless_boot_optimization = true;
1541                 DRM_INFO("Seamless boot condition check passed\n");
1542         }
1543
1544         init_data.flags.enable_mipi_converter_optimization = true;
1545
1546         init_data.dcn_reg_offsets = adev->reg_offset[DCE_HWIP][0];
1547         init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0];
1548
1549         INIT_LIST_HEAD(&adev->dm.da_list);
1550
1551         retrieve_dmi_info(&adev->dm);
1552
1553         /* Display Core create. */
1554         adev->dm.dc = dc_create(&init_data);
1555
1556         if (adev->dm.dc) {
1557                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1558         } else {
1559                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1560                 goto error;
1561         }
1562
1563         if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1564                 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1565                 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1566         }
1567
1568         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1569                 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1570         if (dm_should_disable_stutter(adev->pdev))
1571                 adev->dm.dc->debug.disable_stutter = true;
1572
1573         if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1574                 adev->dm.dc->debug.disable_stutter = true;
1575
1576         if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
1577                 adev->dm.dc->debug.disable_dsc = true;
1578         }
1579
1580         if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1581                 adev->dm.dc->debug.disable_clock_gate = true;
1582
1583         if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH)
1584                 adev->dm.dc->debug.force_subvp_mclk_switch = true;
1585
1586         adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm;
1587
1588         /* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */
1589         adev->dm.dc->debug.ignore_cable_id = true;
1590
1591         r = dm_dmub_hw_init(adev);
1592         if (r) {
1593                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1594                 goto error;
1595         }
1596
1597         dc_hardware_init(adev->dm.dc);
1598
1599         adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1600         if (!adev->dm.hpd_rx_offload_wq) {
1601                 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1602                 goto error;
1603         }
1604
1605         if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1606                 struct dc_phy_addr_space_config pa_config;
1607
1608                 mmhub_read_system_context(adev, &pa_config);
1609
1610                 // Call the DC init_memory func
1611                 dc_setup_system_context(adev->dm.dc, &pa_config);
1612         }
1613
1614         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1615         if (!adev->dm.freesync_module) {
1616                 DRM_ERROR(
1617                 "amdgpu: failed to initialize freesync_module.\n");
1618         } else
1619                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1620                                 adev->dm.freesync_module);
1621
1622         amdgpu_dm_init_color_mod();
1623
1624         if (adev->dm.dc->caps.max_links > 0) {
1625                 adev->dm.vblank_control_workqueue =
1626                         create_singlethread_workqueue("dm_vblank_control_workqueue");
1627                 if (!adev->dm.vblank_control_workqueue)
1628                         DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1629         }
1630
1631 #ifdef CONFIG_DRM_AMD_DC_HDCP
1632         if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1633                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1634
1635                 if (!adev->dm.hdcp_workqueue)
1636                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1637                 else
1638                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1639
1640                 dc_init_callbacks(adev->dm.dc, &init_params);
1641         }
1642 #endif
1643 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1644         adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1645 #endif
1646         if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
1647                 init_completion(&adev->dm.dmub_aux_transfer_done);
1648                 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1649                 if (!adev->dm.dmub_notify) {
1650                         DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1651                         goto error;
1652                 }
1653
1654                 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1655                 if (!adev->dm.delayed_hpd_wq) {
1656                         DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1657                         goto error;
1658                 }
1659
1660                 amdgpu_dm_outbox_init(adev);
1661                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1662                         dmub_aux_setconfig_callback, false)) {
1663                         DRM_ERROR("amdgpu: fail to register dmub aux callback");
1664                         goto error;
1665                 }
1666                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1667                         DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1668                         goto error;
1669                 }
1670                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1671                         DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1672                         goto error;
1673                 }
1674         }
1675
1676         /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
1677          * It is expected that DMUB will resend any pending notifications at this point, for
1678          * example HPD from DPIA.
1679          */
1680         if (dc_is_dmub_outbox_supported(adev->dm.dc))
1681                 dc_enable_dmub_outbox(adev->dm.dc);
1682
1683         if (amdgpu_dm_initialize_drm_device(adev)) {
1684                 DRM_ERROR(
1685                 "amdgpu: failed to initialize sw for display support.\n");
1686                 goto error;
1687         }
1688
1689         /* create fake encoders for MST */
1690         dm_dp_create_fake_mst_encoders(adev);
1691
1692         /* TODO: Add_display_info? */
1693
1694         /* TODO use dynamic cursor width */
1695         adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1696         adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1697
1698         if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1699                 DRM_ERROR(
1700                 "amdgpu: failed to initialize sw for display support.\n");
1701                 goto error;
1702         }
1703
1704
1705         DRM_DEBUG_DRIVER("KMS initialized.\n");
1706
1707         return 0;
1708 error:
1709         amdgpu_dm_fini(adev);
1710
1711         return -EINVAL;
1712 }
1713
1714 static int amdgpu_dm_early_fini(void *handle)
1715 {
1716         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1717
1718         amdgpu_dm_audio_fini(adev);
1719
1720         return 0;
1721 }
1722
1723 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1724 {
1725         int i;
1726
1727         if (adev->dm.vblank_control_workqueue) {
1728                 destroy_workqueue(adev->dm.vblank_control_workqueue);
1729                 adev->dm.vblank_control_workqueue = NULL;
1730         }
1731
1732         amdgpu_dm_destroy_drm_device(&adev->dm);
1733
1734 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1735         if (adev->dm.crc_rd_wrk) {
1736                 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1737                 kfree(adev->dm.crc_rd_wrk);
1738                 adev->dm.crc_rd_wrk = NULL;
1739         }
1740 #endif
1741 #ifdef CONFIG_DRM_AMD_DC_HDCP
1742         if (adev->dm.hdcp_workqueue) {
1743                 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1744                 adev->dm.hdcp_workqueue = NULL;
1745         }
1746
1747         if (adev->dm.dc)
1748                 dc_deinit_callbacks(adev->dm.dc);
1749 #endif
1750
1751         dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1752
1753         if (dc_enable_dmub_notifications(adev->dm.dc)) {
1754                 kfree(adev->dm.dmub_notify);
1755                 adev->dm.dmub_notify = NULL;
1756                 destroy_workqueue(adev->dm.delayed_hpd_wq);
1757                 adev->dm.delayed_hpd_wq = NULL;
1758         }
1759
1760         if (adev->dm.dmub_bo)
1761                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1762                                       &adev->dm.dmub_bo_gpu_addr,
1763                                       &adev->dm.dmub_bo_cpu_addr);
1764
1765         if (adev->dm.hpd_rx_offload_wq) {
1766                 for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1767                         if (adev->dm.hpd_rx_offload_wq[i].wq) {
1768                                 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1769                                 adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1770                         }
1771                 }
1772
1773                 kfree(adev->dm.hpd_rx_offload_wq);
1774                 adev->dm.hpd_rx_offload_wq = NULL;
1775         }
1776
1777         /* DC Destroy TODO: Replace destroy DAL */
1778         if (adev->dm.dc)
1779                 dc_destroy(&adev->dm.dc);
1780         /*
1781          * TODO: pageflip, vlank interrupt
1782          *
1783          * amdgpu_dm_irq_fini(adev);
1784          */
1785
1786         if (adev->dm.cgs_device) {
1787                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1788                 adev->dm.cgs_device = NULL;
1789         }
1790         if (adev->dm.freesync_module) {
1791                 mod_freesync_destroy(adev->dm.freesync_module);
1792                 adev->dm.freesync_module = NULL;
1793         }
1794
1795         mutex_destroy(&adev->dm.audio_lock);
1796         mutex_destroy(&adev->dm.dc_lock);
1797         mutex_destroy(&adev->dm.dpia_aux_lock);
1798
1799         return;
1800 }
1801
1802 static int load_dmcu_fw(struct amdgpu_device *adev)
1803 {
1804         const char *fw_name_dmcu = NULL;
1805         int r;
1806         const struct dmcu_firmware_header_v1_0 *hdr;
1807
1808         switch(adev->asic_type) {
1809 #if defined(CONFIG_DRM_AMD_DC_SI)
1810         case CHIP_TAHITI:
1811         case CHIP_PITCAIRN:
1812         case CHIP_VERDE:
1813         case CHIP_OLAND:
1814 #endif
1815         case CHIP_BONAIRE:
1816         case CHIP_HAWAII:
1817         case CHIP_KAVERI:
1818         case CHIP_KABINI:
1819         case CHIP_MULLINS:
1820         case CHIP_TONGA:
1821         case CHIP_FIJI:
1822         case CHIP_CARRIZO:
1823         case CHIP_STONEY:
1824         case CHIP_POLARIS11:
1825         case CHIP_POLARIS10:
1826         case CHIP_POLARIS12:
1827         case CHIP_VEGAM:
1828         case CHIP_VEGA10:
1829         case CHIP_VEGA12:
1830         case CHIP_VEGA20:
1831                 return 0;
1832         case CHIP_NAVI12:
1833                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1834                 break;
1835         case CHIP_RAVEN:
1836                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1837                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1838                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1839                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1840                 else
1841                         return 0;
1842                 break;
1843         default:
1844                 switch (adev->ip_versions[DCE_HWIP][0]) {
1845                 case IP_VERSION(2, 0, 2):
1846                 case IP_VERSION(2, 0, 3):
1847                 case IP_VERSION(2, 0, 0):
1848                 case IP_VERSION(2, 1, 0):
1849                 case IP_VERSION(3, 0, 0):
1850                 case IP_VERSION(3, 0, 2):
1851                 case IP_VERSION(3, 0, 3):
1852                 case IP_VERSION(3, 0, 1):
1853                 case IP_VERSION(3, 1, 2):
1854                 case IP_VERSION(3, 1, 3):
1855                 case IP_VERSION(3, 1, 4):
1856                 case IP_VERSION(3, 1, 5):
1857                 case IP_VERSION(3, 1, 6):
1858                 case IP_VERSION(3, 2, 0):
1859                 case IP_VERSION(3, 2, 1):
1860                         return 0;
1861                 default:
1862                         break;
1863                 }
1864                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1865                 return -EINVAL;
1866         }
1867
1868         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1869                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1870                 return 0;
1871         }
1872
1873         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1874         if (r == -ENOENT) {
1875                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1876                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1877                 adev->dm.fw_dmcu = NULL;
1878                 return 0;
1879         }
1880         if (r) {
1881                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1882                         fw_name_dmcu);
1883                 return r;
1884         }
1885
1886         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1887         if (r) {
1888                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1889                         fw_name_dmcu);
1890                 release_firmware(adev->dm.fw_dmcu);
1891                 adev->dm.fw_dmcu = NULL;
1892                 return r;
1893         }
1894
1895         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1896         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1897         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1898         adev->firmware.fw_size +=
1899                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1900
1901         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1902         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1903         adev->firmware.fw_size +=
1904                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1905
1906         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1907
1908         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1909
1910         return 0;
1911 }
1912
1913 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1914 {
1915         struct amdgpu_device *adev = ctx;
1916
1917         return dm_read_reg(adev->dm.dc->ctx, address);
1918 }
1919
1920 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1921                                      uint32_t value)
1922 {
1923         struct amdgpu_device *adev = ctx;
1924
1925         return dm_write_reg(adev->dm.dc->ctx, address, value);
1926 }
1927
1928 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1929 {
1930         struct dmub_srv_create_params create_params;
1931         struct dmub_srv_region_params region_params;
1932         struct dmub_srv_region_info region_info;
1933         struct dmub_srv_fb_params fb_params;
1934         struct dmub_srv_fb_info *fb_info;
1935         struct dmub_srv *dmub_srv;
1936         const struct dmcub_firmware_header_v1_0 *hdr;
1937         const char *fw_name_dmub;
1938         enum dmub_asic dmub_asic;
1939         enum dmub_status status;
1940         int r;
1941
1942         switch (adev->ip_versions[DCE_HWIP][0]) {
1943         case IP_VERSION(2, 1, 0):
1944                 dmub_asic = DMUB_ASIC_DCN21;
1945                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1946                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1947                         fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1948                 break;
1949         case IP_VERSION(3, 0, 0):
1950                 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1951                         dmub_asic = DMUB_ASIC_DCN30;
1952                         fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1953                 } else {
1954                         dmub_asic = DMUB_ASIC_DCN30;
1955                         fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1956                 }
1957                 break;
1958         case IP_VERSION(3, 0, 1):
1959                 dmub_asic = DMUB_ASIC_DCN301;
1960                 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1961                 break;
1962         case IP_VERSION(3, 0, 2):
1963                 dmub_asic = DMUB_ASIC_DCN302;
1964                 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1965                 break;
1966         case IP_VERSION(3, 0, 3):
1967                 dmub_asic = DMUB_ASIC_DCN303;
1968                 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1969                 break;
1970         case IP_VERSION(3, 1, 2):
1971         case IP_VERSION(3, 1, 3):
1972                 dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1973                 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1974                 break;
1975         case IP_VERSION(3, 1, 4):
1976                 dmub_asic = DMUB_ASIC_DCN314;
1977                 fw_name_dmub = FIRMWARE_DCN_314_DMUB;
1978                 break;
1979         case IP_VERSION(3, 1, 5):
1980                 dmub_asic = DMUB_ASIC_DCN315;
1981                 fw_name_dmub = FIRMWARE_DCN_315_DMUB;
1982                 break;
1983         case IP_VERSION(3, 1, 6):
1984                 dmub_asic = DMUB_ASIC_DCN316;
1985                 fw_name_dmub = FIRMWARE_DCN316_DMUB;
1986                 break;
1987         case IP_VERSION(3, 2, 0):
1988                 dmub_asic = DMUB_ASIC_DCN32;
1989                 fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB;
1990                 break;
1991         case IP_VERSION(3, 2, 1):
1992                 dmub_asic = DMUB_ASIC_DCN321;
1993                 fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
1994                 break;
1995         default:
1996                 /* ASIC doesn't support DMUB. */
1997                 return 0;
1998         }
1999
2000         r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
2001         if (r) {
2002                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
2003                 return 0;
2004         }
2005
2006         r = amdgpu_ucode_validate(adev->dm.dmub_fw);
2007         if (r) {
2008                 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
2009                 return 0;
2010         }
2011
2012         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
2013         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
2014
2015         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
2016                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
2017                         AMDGPU_UCODE_ID_DMCUB;
2018                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
2019                         adev->dm.dmub_fw;
2020                 adev->firmware.fw_size +=
2021                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
2022
2023                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
2024                          adev->dm.dmcub_fw_version);
2025         }
2026
2027
2028         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
2029         dmub_srv = adev->dm.dmub_srv;
2030
2031         if (!dmub_srv) {
2032                 DRM_ERROR("Failed to allocate DMUB service!\n");
2033                 return -ENOMEM;
2034         }
2035
2036         memset(&create_params, 0, sizeof(create_params));
2037         create_params.user_ctx = adev;
2038         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
2039         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
2040         create_params.asic = dmub_asic;
2041
2042         /* Create the DMUB service. */
2043         status = dmub_srv_create(dmub_srv, &create_params);
2044         if (status != DMUB_STATUS_OK) {
2045                 DRM_ERROR("Error creating DMUB service: %d\n", status);
2046                 return -EINVAL;
2047         }
2048
2049         /* Calculate the size of all the regions for the DMUB service. */
2050         memset(&region_params, 0, sizeof(region_params));
2051
2052         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
2053                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
2054         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
2055         region_params.vbios_size = adev->bios_size;
2056         region_params.fw_bss_data = region_params.bss_data_size ?
2057                 adev->dm.dmub_fw->data +
2058                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2059                 le32_to_cpu(hdr->inst_const_bytes) : NULL;
2060         region_params.fw_inst_const =
2061                 adev->dm.dmub_fw->data +
2062                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2063                 PSP_HEADER_BYTES;
2064
2065         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
2066                                            &region_info);
2067
2068         if (status != DMUB_STATUS_OK) {
2069                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
2070                 return -EINVAL;
2071         }
2072
2073         /*
2074          * Allocate a framebuffer based on the total size of all the regions.
2075          * TODO: Move this into GART.
2076          */
2077         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
2078                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
2079                                     &adev->dm.dmub_bo_gpu_addr,
2080                                     &adev->dm.dmub_bo_cpu_addr);
2081         if (r)
2082                 return r;
2083
2084         /* Rebase the regions on the framebuffer address. */
2085         memset(&fb_params, 0, sizeof(fb_params));
2086         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
2087         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
2088         fb_params.region_info = &region_info;
2089
2090         adev->dm.dmub_fb_info =
2091                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
2092         fb_info = adev->dm.dmub_fb_info;
2093
2094         if (!fb_info) {
2095                 DRM_ERROR(
2096                         "Failed to allocate framebuffer info for DMUB service!\n");
2097                 return -ENOMEM;
2098         }
2099
2100         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
2101         if (status != DMUB_STATUS_OK) {
2102                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
2103                 return -EINVAL;
2104         }
2105
2106         return 0;
2107 }
2108
2109 static int dm_sw_init(void *handle)
2110 {
2111         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2112         int r;
2113
2114         r = dm_dmub_sw_init(adev);
2115         if (r)
2116                 return r;
2117
2118         return load_dmcu_fw(adev);
2119 }
2120
2121 static int dm_sw_fini(void *handle)
2122 {
2123         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2124
2125         kfree(adev->dm.dmub_fb_info);
2126         adev->dm.dmub_fb_info = NULL;
2127
2128         if (adev->dm.dmub_srv) {
2129                 dmub_srv_destroy(adev->dm.dmub_srv);
2130                 adev->dm.dmub_srv = NULL;
2131         }
2132
2133         release_firmware(adev->dm.dmub_fw);
2134         adev->dm.dmub_fw = NULL;
2135
2136         release_firmware(adev->dm.fw_dmcu);
2137         adev->dm.fw_dmcu = NULL;
2138
2139         return 0;
2140 }
2141
2142 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2143 {
2144         struct amdgpu_dm_connector *aconnector;
2145         struct drm_connector *connector;
2146         struct drm_connector_list_iter iter;
2147         int ret = 0;
2148
2149         drm_connector_list_iter_begin(dev, &iter);
2150         drm_for_each_connector_iter(connector, &iter) {
2151                 aconnector = to_amdgpu_dm_connector(connector);
2152                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
2153                     aconnector->mst_mgr.aux) {
2154                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2155                                          aconnector,
2156                                          aconnector->base.base.id);
2157
2158                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2159                         if (ret < 0) {
2160                                 DRM_ERROR("DM_MST: Failed to start MST\n");
2161                                 aconnector->dc_link->type =
2162                                         dc_connection_single;
2163                                 break;
2164                         }
2165                 }
2166         }
2167         drm_connector_list_iter_end(&iter);
2168
2169         return ret;
2170 }
2171
2172 static int dm_late_init(void *handle)
2173 {
2174         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2175
2176         struct dmcu_iram_parameters params;
2177         unsigned int linear_lut[16];
2178         int i;
2179         struct dmcu *dmcu = NULL;
2180
2181         dmcu = adev->dm.dc->res_pool->dmcu;
2182
2183         for (i = 0; i < 16; i++)
2184                 linear_lut[i] = 0xFFFF * i / 15;
2185
2186         params.set = 0;
2187         params.backlight_ramping_override = false;
2188         params.backlight_ramping_start = 0xCCCC;
2189         params.backlight_ramping_reduction = 0xCCCCCCCC;
2190         params.backlight_lut_array_size = 16;
2191         params.backlight_lut_array = linear_lut;
2192
2193         /* Min backlight level after ABM reduction,  Don't allow below 1%
2194          * 0xFFFF x 0.01 = 0x28F
2195          */
2196         params.min_abm_backlight = 0x28F;
2197         /* In the case where abm is implemented on dmcub,
2198         * dmcu object will be null.
2199         * ABM 2.4 and up are implemented on dmcub.
2200         */
2201         if (dmcu) {
2202                 if (!dmcu_load_iram(dmcu, params))
2203                         return -EINVAL;
2204         } else if (adev->dm.dc->ctx->dmub_srv) {
2205                 struct dc_link *edp_links[MAX_NUM_EDP];
2206                 int edp_num;
2207
2208                 get_edp_links(adev->dm.dc, edp_links, &edp_num);
2209                 for (i = 0; i < edp_num; i++) {
2210                         if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2211                                 return -EINVAL;
2212                 }
2213         }
2214
2215         return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2216 }
2217
2218 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2219 {
2220         struct amdgpu_dm_connector *aconnector;
2221         struct drm_connector *connector;
2222         struct drm_connector_list_iter iter;
2223         struct drm_dp_mst_topology_mgr *mgr;
2224         int ret;
2225         bool need_hotplug = false;
2226
2227         drm_connector_list_iter_begin(dev, &iter);
2228         drm_for_each_connector_iter(connector, &iter) {
2229                 aconnector = to_amdgpu_dm_connector(connector);
2230                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
2231                     aconnector->mst_port)
2232                         continue;
2233
2234                 mgr = &aconnector->mst_mgr;
2235
2236                 if (suspend) {
2237                         drm_dp_mst_topology_mgr_suspend(mgr);
2238                 } else {
2239                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2240                         if (ret < 0) {
2241                                 dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
2242                                         aconnector->dc_link);
2243                                 need_hotplug = true;
2244                         }
2245                 }
2246         }
2247         drm_connector_list_iter_end(&iter);
2248
2249         if (need_hotplug)
2250                 drm_kms_helper_hotplug_event(dev);
2251 }
2252
2253 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2254 {
2255         int ret = 0;
2256
2257         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2258          * on window driver dc implementation.
2259          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2260          * should be passed to smu during boot up and resume from s3.
2261          * boot up: dc calculate dcn watermark clock settings within dc_create,
2262          * dcn20_resource_construct
2263          * then call pplib functions below to pass the settings to smu:
2264          * smu_set_watermarks_for_clock_ranges
2265          * smu_set_watermarks_table
2266          * navi10_set_watermarks_table
2267          * smu_write_watermarks_table
2268          *
2269          * For Renoir, clock settings of dcn watermark are also fixed values.
2270          * dc has implemented different flow for window driver:
2271          * dc_hardware_init / dc_set_power_state
2272          * dcn10_init_hw
2273          * notify_wm_ranges
2274          * set_wm_ranges
2275          * -- Linux
2276          * smu_set_watermarks_for_clock_ranges
2277          * renoir_set_watermarks_table
2278          * smu_write_watermarks_table
2279          *
2280          * For Linux,
2281          * dc_hardware_init -> amdgpu_dm_init
2282          * dc_set_power_state --> dm_resume
2283          *
2284          * therefore, this function apply to navi10/12/14 but not Renoir
2285          * *
2286          */
2287         switch (adev->ip_versions[DCE_HWIP][0]) {
2288         case IP_VERSION(2, 0, 2):
2289         case IP_VERSION(2, 0, 0):
2290                 break;
2291         default:
2292                 return 0;
2293         }
2294
2295         ret = amdgpu_dpm_write_watermarks_table(adev);
2296         if (ret) {
2297                 DRM_ERROR("Failed to update WMTABLE!\n");
2298                 return ret;
2299         }
2300
2301         return 0;
2302 }
2303
2304 /**
2305  * dm_hw_init() - Initialize DC device
2306  * @handle: The base driver device containing the amdgpu_dm device.
2307  *
2308  * Initialize the &struct amdgpu_display_manager device. This involves calling
2309  * the initializers of each DM component, then populating the struct with them.
2310  *
2311  * Although the function implies hardware initialization, both hardware and
2312  * software are initialized here. Splitting them out to their relevant init
2313  * hooks is a future TODO item.
2314  *
2315  * Some notable things that are initialized here:
2316  *
2317  * - Display Core, both software and hardware
2318  * - DC modules that we need (freesync and color management)
2319  * - DRM software states
2320  * - Interrupt sources and handlers
2321  * - Vblank support
2322  * - Debug FS entries, if enabled
2323  */
2324 static int dm_hw_init(void *handle)
2325 {
2326         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2327         /* Create DAL display manager */
2328         amdgpu_dm_init(adev);
2329         amdgpu_dm_hpd_init(adev);
2330
2331         return 0;
2332 }
2333
2334 /**
2335  * dm_hw_fini() - Teardown DC device
2336  * @handle: The base driver device containing the amdgpu_dm device.
2337  *
2338  * Teardown components within &struct amdgpu_display_manager that require
2339  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2340  * were loaded. Also flush IRQ workqueues and disable them.
2341  */
2342 static int dm_hw_fini(void *handle)
2343 {
2344         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2345
2346         amdgpu_dm_hpd_fini(adev);
2347
2348         amdgpu_dm_irq_fini(adev);
2349         amdgpu_dm_fini(adev);
2350         return 0;
2351 }
2352
2353
2354 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2355                                  struct dc_state *state, bool enable)
2356 {
2357         enum dc_irq_source irq_source;
2358         struct amdgpu_crtc *acrtc;
2359         int rc = -EBUSY;
2360         int i = 0;
2361
2362         for (i = 0; i < state->stream_count; i++) {
2363                 acrtc = get_crtc_by_otg_inst(
2364                                 adev, state->stream_status[i].primary_otg_inst);
2365
2366                 if (acrtc && state->stream_status[i].plane_count != 0) {
2367                         irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2368                         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2369                         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2370                                       acrtc->crtc_id, enable ? "en" : "dis", rc);
2371                         if (rc)
2372                                 DRM_WARN("Failed to %s pflip interrupts\n",
2373                                          enable ? "enable" : "disable");
2374
2375                         if (enable) {
2376                                 rc = dm_enable_vblank(&acrtc->base);
2377                                 if (rc)
2378                                         DRM_WARN("Failed to enable vblank interrupts\n");
2379                         } else {
2380                                 dm_disable_vblank(&acrtc->base);
2381                         }
2382
2383                 }
2384         }
2385
2386 }
2387
2388 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2389 {
2390         struct dc_state *context = NULL;
2391         enum dc_status res = DC_ERROR_UNEXPECTED;
2392         int i;
2393         struct dc_stream_state *del_streams[MAX_PIPES];
2394         int del_streams_count = 0;
2395
2396         memset(del_streams, 0, sizeof(del_streams));
2397
2398         context = dc_create_state(dc);
2399         if (context == NULL)
2400                 goto context_alloc_fail;
2401
2402         dc_resource_state_copy_construct_current(dc, context);
2403
2404         /* First remove from context all streams */
2405         for (i = 0; i < context->stream_count; i++) {
2406                 struct dc_stream_state *stream = context->streams[i];
2407
2408                 del_streams[del_streams_count++] = stream;
2409         }
2410
2411         /* Remove all planes for removed streams and then remove the streams */
2412         for (i = 0; i < del_streams_count; i++) {
2413                 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2414                         res = DC_FAIL_DETACH_SURFACES;
2415                         goto fail;
2416                 }
2417
2418                 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2419                 if (res != DC_OK)
2420                         goto fail;
2421         }
2422
2423         res = dc_commit_state(dc, context);
2424
2425 fail:
2426         dc_release_state(context);
2427
2428 context_alloc_fail:
2429         return res;
2430 }
2431
2432 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2433 {
2434         int i;
2435
2436         if (dm->hpd_rx_offload_wq) {
2437                 for (i = 0; i < dm->dc->caps.max_links; i++)
2438                         flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2439         }
2440 }
2441
2442 static int dm_suspend(void *handle)
2443 {
2444         struct amdgpu_device *adev = handle;
2445         struct amdgpu_display_manager *dm = &adev->dm;
2446         int ret = 0;
2447
2448         if (amdgpu_in_reset(adev)) {
2449                 mutex_lock(&dm->dc_lock);
2450
2451                 dc_allow_idle_optimizations(adev->dm.dc, false);
2452
2453                 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2454
2455                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2456
2457                 amdgpu_dm_commit_zero_streams(dm->dc);
2458
2459                 amdgpu_dm_irq_suspend(adev);
2460
2461                 hpd_rx_irq_work_suspend(dm);
2462
2463                 return ret;
2464         }
2465
2466         WARN_ON(adev->dm.cached_state);
2467         adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2468
2469         s3_handle_mst(adev_to_drm(adev), true);
2470
2471         amdgpu_dm_irq_suspend(adev);
2472
2473         hpd_rx_irq_work_suspend(dm);
2474
2475         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2476
2477         return 0;
2478 }
2479
2480 struct amdgpu_dm_connector *
2481 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2482                                              struct drm_crtc *crtc)
2483 {
2484         uint32_t i;
2485         struct drm_connector_state *new_con_state;
2486         struct drm_connector *connector;
2487         struct drm_crtc *crtc_from_state;
2488
2489         for_each_new_connector_in_state(state, connector, new_con_state, i) {
2490                 crtc_from_state = new_con_state->crtc;
2491
2492                 if (crtc_from_state == crtc)
2493                         return to_amdgpu_dm_connector(connector);
2494         }
2495
2496         return NULL;
2497 }
2498
2499 static void emulated_link_detect(struct dc_link *link)
2500 {
2501         struct dc_sink_init_data sink_init_data = { 0 };
2502         struct display_sink_capability sink_caps = { 0 };
2503         enum dc_edid_status edid_status;
2504         struct dc_context *dc_ctx = link->ctx;
2505         struct dc_sink *sink = NULL;
2506         struct dc_sink *prev_sink = NULL;
2507
2508         link->type = dc_connection_none;
2509         prev_sink = link->local_sink;
2510
2511         if (prev_sink)
2512                 dc_sink_release(prev_sink);
2513
2514         switch (link->connector_signal) {
2515         case SIGNAL_TYPE_HDMI_TYPE_A: {
2516                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2517                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2518                 break;
2519         }
2520
2521         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2522                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2523                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2524                 break;
2525         }
2526
2527         case SIGNAL_TYPE_DVI_DUAL_LINK: {
2528                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2529                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2530                 break;
2531         }
2532
2533         case SIGNAL_TYPE_LVDS: {
2534                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2535                 sink_caps.signal = SIGNAL_TYPE_LVDS;
2536                 break;
2537         }
2538
2539         case SIGNAL_TYPE_EDP: {
2540                 sink_caps.transaction_type =
2541                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2542                 sink_caps.signal = SIGNAL_TYPE_EDP;
2543                 break;
2544         }
2545
2546         case SIGNAL_TYPE_DISPLAY_PORT: {
2547                 sink_caps.transaction_type =
2548                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2549                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2550                 break;
2551         }
2552
2553         default:
2554                 DC_ERROR("Invalid connector type! signal:%d\n",
2555                         link->connector_signal);
2556                 return;
2557         }
2558
2559         sink_init_data.link = link;
2560         sink_init_data.sink_signal = sink_caps.signal;
2561
2562         sink = dc_sink_create(&sink_init_data);
2563         if (!sink) {
2564                 DC_ERROR("Failed to create sink!\n");
2565                 return;
2566         }
2567
2568         /* dc_sink_create returns a new reference */
2569         link->local_sink = sink;
2570
2571         edid_status = dm_helpers_read_local_edid(
2572                         link->ctx,
2573                         link,
2574                         sink);
2575
2576         if (edid_status != EDID_OK)
2577                 DC_ERROR("Failed to read EDID");
2578
2579 }
2580
2581 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2582                                      struct amdgpu_display_manager *dm)
2583 {
2584         struct {
2585                 struct dc_surface_update surface_updates[MAX_SURFACES];
2586                 struct dc_plane_info plane_infos[MAX_SURFACES];
2587                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2588                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2589                 struct dc_stream_update stream_update;
2590         } * bundle;
2591         int k, m;
2592
2593         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2594
2595         if (!bundle) {
2596                 dm_error("Failed to allocate update bundle\n");
2597                 goto cleanup;
2598         }
2599
2600         for (k = 0; k < dc_state->stream_count; k++) {
2601                 bundle->stream_update.stream = dc_state->streams[k];
2602
2603                 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2604                         bundle->surface_updates[m].surface =
2605                                 dc_state->stream_status->plane_states[m];
2606                         bundle->surface_updates[m].surface->force_full_update =
2607                                 true;
2608                 }
2609                 dc_commit_updates_for_stream(
2610                         dm->dc, bundle->surface_updates,
2611                         dc_state->stream_status->plane_count,
2612                         dc_state->streams[k], &bundle->stream_update, dc_state);
2613         }
2614
2615 cleanup:
2616         kfree(bundle);
2617
2618         return;
2619 }
2620
2621 static int dm_resume(void *handle)
2622 {
2623         struct amdgpu_device *adev = handle;
2624         struct drm_device *ddev = adev_to_drm(adev);
2625         struct amdgpu_display_manager *dm = &adev->dm;
2626         struct amdgpu_dm_connector *aconnector;
2627         struct drm_connector *connector;
2628         struct drm_connector_list_iter iter;
2629         struct drm_crtc *crtc;
2630         struct drm_crtc_state *new_crtc_state;
2631         struct dm_crtc_state *dm_new_crtc_state;
2632         struct drm_plane *plane;
2633         struct drm_plane_state *new_plane_state;
2634         struct dm_plane_state *dm_new_plane_state;
2635         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2636         enum dc_connection_type new_connection_type = dc_connection_none;
2637         struct dc_state *dc_state;
2638         int i, r, j;
2639
2640         if (amdgpu_in_reset(adev)) {
2641                 dc_state = dm->cached_dc_state;
2642
2643                 /*
2644                  * The dc->current_state is backed up into dm->cached_dc_state
2645                  * before we commit 0 streams.
2646                  *
2647                  * DC will clear link encoder assignments on the real state
2648                  * but the changes won't propagate over to the copy we made
2649                  * before the 0 streams commit.
2650                  *
2651                  * DC expects that link encoder assignments are *not* valid
2652                  * when committing a state, so as a workaround we can copy
2653                  * off of the current state.
2654                  *
2655                  * We lose the previous assignments, but we had already
2656                  * commit 0 streams anyway.
2657                  */
2658                 link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
2659
2660                 r = dm_dmub_hw_init(adev);
2661                 if (r)
2662                         DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2663
2664                 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2665                 dc_resume(dm->dc);
2666
2667                 amdgpu_dm_irq_resume_early(adev);
2668
2669                 for (i = 0; i < dc_state->stream_count; i++) {
2670                         dc_state->streams[i]->mode_changed = true;
2671                         for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2672                                 dc_state->stream_status[i].plane_states[j]->update_flags.raw
2673                                         = 0xffffffff;
2674                         }
2675                 }
2676
2677                 if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
2678                         amdgpu_dm_outbox_init(adev);
2679                         dc_enable_dmub_outbox(adev->dm.dc);
2680                 }
2681
2682                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2683
2684                 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2685
2686                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2687
2688                 dc_release_state(dm->cached_dc_state);
2689                 dm->cached_dc_state = NULL;
2690
2691                 amdgpu_dm_irq_resume_late(adev);
2692
2693                 mutex_unlock(&dm->dc_lock);
2694
2695                 return 0;
2696         }
2697         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2698         dc_release_state(dm_state->context);
2699         dm_state->context = dc_create_state(dm->dc);
2700         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2701         dc_resource_state_construct(dm->dc, dm_state->context);
2702
2703         /* Before powering on DC we need to re-initialize DMUB. */
2704         dm_dmub_hw_resume(adev);
2705
2706         /* Re-enable outbox interrupts for DPIA. */
2707         if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
2708                 amdgpu_dm_outbox_init(adev);
2709                 dc_enable_dmub_outbox(adev->dm.dc);
2710         }
2711
2712         /* power on hardware */
2713         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2714
2715         /* program HPD filter */
2716         dc_resume(dm->dc);
2717
2718         /*
2719          * early enable HPD Rx IRQ, should be done before set mode as short
2720          * pulse interrupts are used for MST
2721          */
2722         amdgpu_dm_irq_resume_early(adev);
2723
2724         /* On resume we need to rewrite the MSTM control bits to enable MST*/
2725         s3_handle_mst(ddev, false);
2726
2727         /* Do detection*/
2728         drm_connector_list_iter_begin(ddev, &iter);
2729         drm_for_each_connector_iter(connector, &iter) {
2730                 aconnector = to_amdgpu_dm_connector(connector);
2731
2732                 /*
2733                  * this is the case when traversing through already created
2734                  * MST connectors, should be skipped
2735                  */
2736                 if (aconnector->dc_link &&
2737                     aconnector->dc_link->type == dc_connection_mst_branch)
2738                         continue;
2739
2740                 mutex_lock(&aconnector->hpd_lock);
2741                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2742                         DRM_ERROR("KMS: Failed to detect connector\n");
2743
2744                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2745                         emulated_link_detect(aconnector->dc_link);
2746                 } else {
2747                         mutex_lock(&dm->dc_lock);
2748                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2749                         mutex_unlock(&dm->dc_lock);
2750                 }
2751
2752                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2753                         aconnector->fake_enable = false;
2754
2755                 if (aconnector->dc_sink)
2756                         dc_sink_release(aconnector->dc_sink);
2757                 aconnector->dc_sink = NULL;
2758                 amdgpu_dm_update_connector_after_detect(aconnector);
2759                 mutex_unlock(&aconnector->hpd_lock);
2760         }
2761         drm_connector_list_iter_end(&iter);
2762
2763         /* Force mode set in atomic commit */
2764         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2765                 new_crtc_state->active_changed = true;
2766
2767         /*
2768          * atomic_check is expected to create the dc states. We need to release
2769          * them here, since they were duplicated as part of the suspend
2770          * procedure.
2771          */
2772         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2773                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2774                 if (dm_new_crtc_state->stream) {
2775                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2776                         dc_stream_release(dm_new_crtc_state->stream);
2777                         dm_new_crtc_state->stream = NULL;
2778                 }
2779         }
2780
2781         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2782                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2783                 if (dm_new_plane_state->dc_state) {
2784                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2785                         dc_plane_state_release(dm_new_plane_state->dc_state);
2786                         dm_new_plane_state->dc_state = NULL;
2787                 }
2788         }
2789
2790         drm_atomic_helper_resume(ddev, dm->cached_state);
2791
2792         dm->cached_state = NULL;
2793
2794         amdgpu_dm_irq_resume_late(adev);
2795
2796         amdgpu_dm_smu_write_watermarks_table(adev);
2797
2798         return 0;
2799 }
2800
2801 /**
2802  * DOC: DM Lifecycle
2803  *
2804  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2805  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2806  * the base driver's device list to be initialized and torn down accordingly.
2807  *
2808  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2809  */
2810
2811 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2812         .name = "dm",
2813         .early_init = dm_early_init,
2814         .late_init = dm_late_init,
2815         .sw_init = dm_sw_init,
2816         .sw_fini = dm_sw_fini,
2817         .early_fini = amdgpu_dm_early_fini,
2818         .hw_init = dm_hw_init,
2819         .hw_fini = dm_hw_fini,
2820         .suspend = dm_suspend,
2821         .resume = dm_resume,
2822         .is_idle = dm_is_idle,
2823         .wait_for_idle = dm_wait_for_idle,
2824         .check_soft_reset = dm_check_soft_reset,
2825         .soft_reset = dm_soft_reset,
2826         .set_clockgating_state = dm_set_clockgating_state,
2827         .set_powergating_state = dm_set_powergating_state,
2828 };
2829
2830 const struct amdgpu_ip_block_version dm_ip_block =
2831 {
2832         .type = AMD_IP_BLOCK_TYPE_DCE,
2833         .major = 1,
2834         .minor = 0,
2835         .rev = 0,
2836         .funcs = &amdgpu_dm_funcs,
2837 };
2838
2839
2840 /**
2841  * DOC: atomic
2842  *
2843  * *WIP*
2844  */
2845
2846 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2847         .fb_create = amdgpu_display_user_framebuffer_create,
2848         .get_format_info = amd_get_format_info,
2849         .atomic_check = amdgpu_dm_atomic_check,
2850         .atomic_commit = drm_atomic_helper_commit,
2851 };
2852
2853 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2854         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail,
2855         .atomic_commit_setup = drm_dp_mst_atomic_setup_commit,
2856 };
2857
2858 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2859 {
2860         struct amdgpu_dm_backlight_caps *caps;
2861         struct amdgpu_display_manager *dm;
2862         struct drm_connector *conn_base;
2863         struct amdgpu_device *adev;
2864         struct dc_link *link = NULL;
2865         struct drm_luminance_range_info *luminance_range;
2866         int i;
2867
2868         if (!aconnector || !aconnector->dc_link)
2869                 return;
2870
2871         link = aconnector->dc_link;
2872         if (link->connector_signal != SIGNAL_TYPE_EDP)
2873                 return;
2874
2875         conn_base = &aconnector->base;
2876         adev = drm_to_adev(conn_base->dev);
2877         dm = &adev->dm;
2878         for (i = 0; i < dm->num_of_edps; i++) {
2879                 if (link == dm->backlight_link[i])
2880                         break;
2881         }
2882         if (i >= dm->num_of_edps)
2883                 return;
2884         caps = &dm->backlight_caps[i];
2885         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2886         caps->aux_support = false;
2887
2888         if (caps->ext_caps->bits.oled == 1 /*||
2889             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2890             caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2891                 caps->aux_support = true;
2892
2893         if (amdgpu_backlight == 0)
2894                 caps->aux_support = false;
2895         else if (amdgpu_backlight == 1)
2896                 caps->aux_support = true;
2897
2898         luminance_range = &conn_base->display_info.luminance_range;
2899         caps->aux_min_input_signal = luminance_range->min_luminance;
2900         caps->aux_max_input_signal = luminance_range->max_luminance;
2901 }
2902
2903 void amdgpu_dm_update_connector_after_detect(
2904                 struct amdgpu_dm_connector *aconnector)
2905 {
2906         struct drm_connector *connector = &aconnector->base;
2907         struct drm_device *dev = connector->dev;
2908         struct dc_sink *sink;
2909
2910         /* MST handled by drm_mst framework */
2911         if (aconnector->mst_mgr.mst_state == true)
2912                 return;
2913
2914         sink = aconnector->dc_link->local_sink;
2915         if (sink)
2916                 dc_sink_retain(sink);
2917
2918         /*
2919          * Edid mgmt connector gets first update only in mode_valid hook and then
2920          * the connector sink is set to either fake or physical sink depends on link status.
2921          * Skip if already done during boot.
2922          */
2923         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2924                         && aconnector->dc_em_sink) {
2925
2926                 /*
2927                  * For S3 resume with headless use eml_sink to fake stream
2928                  * because on resume connector->sink is set to NULL
2929                  */
2930                 mutex_lock(&dev->mode_config.mutex);
2931
2932                 if (sink) {
2933                         if (aconnector->dc_sink) {
2934                                 amdgpu_dm_update_freesync_caps(connector, NULL);
2935                                 /*
2936                                  * retain and release below are used to
2937                                  * bump up refcount for sink because the link doesn't point
2938                                  * to it anymore after disconnect, so on next crtc to connector
2939                                  * reshuffle by UMD we will get into unwanted dc_sink release
2940                                  */
2941                                 dc_sink_release(aconnector->dc_sink);
2942                         }
2943                         aconnector->dc_sink = sink;
2944                         dc_sink_retain(aconnector->dc_sink);
2945                         amdgpu_dm_update_freesync_caps(connector,
2946                                         aconnector->edid);
2947                 } else {
2948                         amdgpu_dm_update_freesync_caps(connector, NULL);
2949                         if (!aconnector->dc_sink) {
2950                                 aconnector->dc_sink = aconnector->dc_em_sink;
2951                                 dc_sink_retain(aconnector->dc_sink);
2952                         }
2953                 }
2954
2955                 mutex_unlock(&dev->mode_config.mutex);
2956
2957                 if (sink)
2958                         dc_sink_release(sink);
2959                 return;
2960         }
2961
2962         /*
2963          * TODO: temporary guard to look for proper fix
2964          * if this sink is MST sink, we should not do anything
2965          */
2966         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2967                 dc_sink_release(sink);
2968                 return;
2969         }
2970
2971         if (aconnector->dc_sink == sink) {
2972                 /*
2973                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
2974                  * Do nothing!!
2975                  */
2976                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2977                                 aconnector->connector_id);
2978                 if (sink)
2979                         dc_sink_release(sink);
2980                 return;
2981         }
2982
2983         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2984                 aconnector->connector_id, aconnector->dc_sink, sink);
2985
2986         mutex_lock(&dev->mode_config.mutex);
2987
2988         /*
2989          * 1. Update status of the drm connector
2990          * 2. Send an event and let userspace tell us what to do
2991          */
2992         if (sink) {
2993                 /*
2994                  * TODO: check if we still need the S3 mode update workaround.
2995                  * If yes, put it here.
2996                  */
2997                 if (aconnector->dc_sink) {
2998                         amdgpu_dm_update_freesync_caps(connector, NULL);
2999                         dc_sink_release(aconnector->dc_sink);
3000                 }
3001
3002                 aconnector->dc_sink = sink;
3003                 dc_sink_retain(aconnector->dc_sink);
3004                 if (sink->dc_edid.length == 0) {
3005                         aconnector->edid = NULL;
3006                         if (aconnector->dc_link->aux_mode) {
3007                                 drm_dp_cec_unset_edid(
3008                                         &aconnector->dm_dp_aux.aux);
3009                         }
3010                 } else {
3011                         aconnector->edid =
3012                                 (struct edid *)sink->dc_edid.raw_edid;
3013
3014                         if (aconnector->dc_link->aux_mode)
3015                                 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
3016                                                     aconnector->edid);
3017                 }
3018
3019                 drm_connector_update_edid_property(connector, aconnector->edid);
3020                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
3021                 update_connector_ext_caps(aconnector);
3022         } else {
3023                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
3024                 amdgpu_dm_update_freesync_caps(connector, NULL);
3025                 drm_connector_update_edid_property(connector, NULL);
3026                 aconnector->num_modes = 0;
3027                 dc_sink_release(aconnector->dc_sink);
3028                 aconnector->dc_sink = NULL;
3029                 aconnector->edid = NULL;
3030 #ifdef CONFIG_DRM_AMD_DC_HDCP
3031                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
3032                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
3033                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
3034 #endif
3035         }
3036
3037         mutex_unlock(&dev->mode_config.mutex);
3038
3039         update_subconnector_property(aconnector);
3040
3041         if (sink)
3042                 dc_sink_release(sink);
3043 }
3044
3045 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
3046 {
3047         struct drm_connector *connector = &aconnector->base;
3048         struct drm_device *dev = connector->dev;
3049         enum dc_connection_type new_connection_type = dc_connection_none;
3050         struct amdgpu_device *adev = drm_to_adev(dev);
3051 #ifdef CONFIG_DRM_AMD_DC_HDCP
3052         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
3053 #endif
3054         bool ret = false;
3055
3056         if (adev->dm.disable_hpd_irq)
3057                 return;
3058
3059         /*
3060          * In case of failure or MST no need to update connector status or notify the OS
3061          * since (for MST case) MST does this in its own context.
3062          */
3063         mutex_lock(&aconnector->hpd_lock);
3064
3065 #ifdef CONFIG_DRM_AMD_DC_HDCP
3066         if (adev->dm.hdcp_workqueue) {
3067                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3068                 dm_con_state->update_hdcp = true;
3069         }
3070 #endif
3071         if (aconnector->fake_enable)
3072                 aconnector->fake_enable = false;
3073
3074         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3075                 DRM_ERROR("KMS: Failed to detect connector\n");
3076
3077         if (aconnector->base.force && new_connection_type == dc_connection_none) {
3078                 emulated_link_detect(aconnector->dc_link);
3079
3080                 drm_modeset_lock_all(dev);
3081                 dm_restore_drm_connector_state(dev, connector);
3082                 drm_modeset_unlock_all(dev);
3083
3084                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3085                         drm_kms_helper_connector_hotplug_event(connector);
3086         } else {
3087                 mutex_lock(&adev->dm.dc_lock);
3088                 ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3089                 mutex_unlock(&adev->dm.dc_lock);
3090                 if (ret) {
3091                         amdgpu_dm_update_connector_after_detect(aconnector);
3092
3093                         drm_modeset_lock_all(dev);
3094                         dm_restore_drm_connector_state(dev, connector);
3095                         drm_modeset_unlock_all(dev);
3096
3097                         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3098                                 drm_kms_helper_connector_hotplug_event(connector);
3099                 }
3100         }
3101         mutex_unlock(&aconnector->hpd_lock);
3102
3103 }
3104
3105 static void handle_hpd_irq(void *param)
3106 {
3107         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3108
3109         handle_hpd_irq_helper(aconnector);
3110
3111 }
3112
3113 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3114 {
3115         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3116         uint8_t dret;
3117         bool new_irq_handled = false;
3118         int dpcd_addr;
3119         int dpcd_bytes_to_read;
3120
3121         const int max_process_count = 30;
3122         int process_count = 0;
3123
3124         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3125
3126         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3127                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3128                 /* DPCD 0x200 - 0x201 for downstream IRQ */
3129                 dpcd_addr = DP_SINK_COUNT;
3130         } else {
3131                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3132                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
3133                 dpcd_addr = DP_SINK_COUNT_ESI;
3134         }
3135
3136         dret = drm_dp_dpcd_read(
3137                 &aconnector->dm_dp_aux.aux,
3138                 dpcd_addr,
3139                 esi,
3140                 dpcd_bytes_to_read);
3141
3142         while (dret == dpcd_bytes_to_read &&
3143                 process_count < max_process_count) {
3144                 uint8_t retry;
3145                 dret = 0;
3146
3147                 process_count++;
3148
3149                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3150                 /* handle HPD short pulse irq */
3151                 if (aconnector->mst_mgr.mst_state)
3152                         drm_dp_mst_hpd_irq(
3153                                 &aconnector->mst_mgr,
3154                                 esi,
3155                                 &new_irq_handled);
3156
3157                 if (new_irq_handled) {
3158                         /* ACK at DPCD to notify down stream */
3159                         const int ack_dpcd_bytes_to_write =
3160                                 dpcd_bytes_to_read - 1;
3161
3162                         for (retry = 0; retry < 3; retry++) {
3163                                 uint8_t wret;
3164
3165                                 wret = drm_dp_dpcd_write(
3166                                         &aconnector->dm_dp_aux.aux,
3167                                         dpcd_addr + 1,
3168                                         &esi[1],
3169                                         ack_dpcd_bytes_to_write);
3170                                 if (wret == ack_dpcd_bytes_to_write)
3171                                         break;
3172                         }
3173
3174                         /* check if there is new irq to be handled */
3175                         dret = drm_dp_dpcd_read(
3176                                 &aconnector->dm_dp_aux.aux,
3177                                 dpcd_addr,
3178                                 esi,
3179                                 dpcd_bytes_to_read);
3180
3181                         new_irq_handled = false;
3182                 } else {
3183                         break;
3184                 }
3185         }
3186
3187         if (process_count == max_process_count)
3188                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3189 }
3190
3191 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3192                                                         union hpd_irq_data hpd_irq_data)
3193 {
3194         struct hpd_rx_irq_offload_work *offload_work =
3195                                 kzalloc(sizeof(*offload_work), GFP_KERNEL);
3196
3197         if (!offload_work) {
3198                 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3199                 return;
3200         }
3201
3202         INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3203         offload_work->data = hpd_irq_data;
3204         offload_work->offload_wq = offload_wq;
3205
3206         queue_work(offload_wq->wq, &offload_work->work);
3207         DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3208 }
3209
3210 static void handle_hpd_rx_irq(void *param)
3211 {
3212         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3213         struct drm_connector *connector = &aconnector->base;
3214         struct drm_device *dev = connector->dev;
3215         struct dc_link *dc_link = aconnector->dc_link;
3216         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3217         bool result = false;
3218         enum dc_connection_type new_connection_type = dc_connection_none;
3219         struct amdgpu_device *adev = drm_to_adev(dev);
3220         union hpd_irq_data hpd_irq_data;
3221         bool link_loss = false;
3222         bool has_left_work = false;
3223         int idx = aconnector->base.index;
3224         struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3225
3226         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3227
3228         if (adev->dm.disable_hpd_irq)
3229                 return;
3230
3231         /*
3232          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3233          * conflict, after implement i2c helper, this mutex should be
3234          * retired.
3235          */
3236         mutex_lock(&aconnector->hpd_lock);
3237
3238         result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3239                                                 &link_loss, true, &has_left_work);
3240
3241         if (!has_left_work)
3242                 goto out;
3243
3244         if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3245                 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3246                 goto out;
3247         }
3248
3249         if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3250                 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3251                         hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3252                         dm_handle_mst_sideband_msg(aconnector);
3253                         goto out;
3254                 }
3255
3256                 if (link_loss) {
3257                         bool skip = false;
3258
3259                         spin_lock(&offload_wq->offload_lock);
3260                         skip = offload_wq->is_handling_link_loss;
3261
3262                         if (!skip)
3263                                 offload_wq->is_handling_link_loss = true;
3264
3265                         spin_unlock(&offload_wq->offload_lock);
3266
3267                         if (!skip)
3268                                 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3269
3270                         goto out;
3271                 }
3272         }
3273
3274 out:
3275         if (result && !is_mst_root_connector) {
3276                 /* Downstream Port status changed. */
3277                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
3278                         DRM_ERROR("KMS: Failed to detect connector\n");
3279
3280                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3281                         emulated_link_detect(dc_link);
3282
3283                         if (aconnector->fake_enable)
3284                                 aconnector->fake_enable = false;
3285
3286                         amdgpu_dm_update_connector_after_detect(aconnector);
3287
3288
3289                         drm_modeset_lock_all(dev);
3290                         dm_restore_drm_connector_state(dev, connector);
3291                         drm_modeset_unlock_all(dev);
3292
3293                         drm_kms_helper_connector_hotplug_event(connector);
3294                 } else {
3295                         bool ret = false;
3296
3297                         mutex_lock(&adev->dm.dc_lock);
3298                         ret = dc_link_detect(dc_link, DETECT_REASON_HPDRX);
3299                         mutex_unlock(&adev->dm.dc_lock);
3300
3301                         if (ret) {
3302                                 if (aconnector->fake_enable)
3303                                         aconnector->fake_enable = false;
3304
3305                                 amdgpu_dm_update_connector_after_detect(aconnector);
3306
3307                                 drm_modeset_lock_all(dev);
3308                                 dm_restore_drm_connector_state(dev, connector);
3309                                 drm_modeset_unlock_all(dev);
3310
3311                                 drm_kms_helper_connector_hotplug_event(connector);
3312                         }
3313                 }
3314         }
3315 #ifdef CONFIG_DRM_AMD_DC_HDCP
3316         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3317                 if (adev->dm.hdcp_workqueue)
3318                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
3319         }
3320 #endif
3321
3322         if (dc_link->type != dc_connection_mst_branch)
3323                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3324
3325         mutex_unlock(&aconnector->hpd_lock);
3326 }
3327
3328 static void register_hpd_handlers(struct amdgpu_device *adev)
3329 {
3330         struct drm_device *dev = adev_to_drm(adev);
3331         struct drm_connector *connector;
3332         struct amdgpu_dm_connector *aconnector;
3333         const struct dc_link *dc_link;
3334         struct dc_interrupt_params int_params = {0};
3335
3336         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3337         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3338
3339         list_for_each_entry(connector,
3340                         &dev->mode_config.connector_list, head) {
3341
3342                 aconnector = to_amdgpu_dm_connector(connector);
3343                 dc_link = aconnector->dc_link;
3344
3345                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3346                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3347                         int_params.irq_source = dc_link->irq_source_hpd;
3348
3349                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
3350                                         handle_hpd_irq,
3351                                         (void *) aconnector);
3352                 }
3353
3354                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3355
3356                         /* Also register for DP short pulse (hpd_rx). */
3357                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3358                         int_params.irq_source = dc_link->irq_source_hpd_rx;
3359
3360                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
3361                                         handle_hpd_rx_irq,
3362                                         (void *) aconnector);
3363
3364                         if (adev->dm.hpd_rx_offload_wq)
3365                                 adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3366                                         aconnector;
3367                 }
3368         }
3369 }
3370
3371 #if defined(CONFIG_DRM_AMD_DC_SI)
3372 /* Register IRQ sources and initialize IRQ callbacks */
3373 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3374 {
3375         struct dc *dc = adev->dm.dc;
3376         struct common_irq_params *c_irq_params;
3377         struct dc_interrupt_params int_params = {0};
3378         int r;
3379         int i;
3380         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3381
3382         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3383         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3384
3385         /*
3386          * Actions of amdgpu_irq_add_id():
3387          * 1. Register a set() function with base driver.
3388          *    Base driver will call set() function to enable/disable an
3389          *    interrupt in DC hardware.
3390          * 2. Register amdgpu_dm_irq_handler().
3391          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3392          *    coming from DC hardware.
3393          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3394          *    for acknowledging and handling. */
3395
3396         /* Use VBLANK interrupt */
3397         for (i = 0; i < adev->mode_info.num_crtc; i++) {
3398                 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3399                 if (r) {
3400                         DRM_ERROR("Failed to add crtc irq id!\n");
3401                         return r;
3402                 }
3403
3404                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3405                 int_params.irq_source =
3406                         dc_interrupt_to_irq_source(dc, i+1 , 0);
3407
3408                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3409
3410                 c_irq_params->adev = adev;
3411                 c_irq_params->irq_src = int_params.irq_source;
3412
3413                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3414                                 dm_crtc_high_irq, c_irq_params);
3415         }
3416
3417         /* Use GRPH_PFLIP interrupt */
3418         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3419                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3420                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3421                 if (r) {
3422                         DRM_ERROR("Failed to add page flip irq id!\n");
3423                         return r;
3424                 }
3425
3426                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3427                 int_params.irq_source =
3428                         dc_interrupt_to_irq_source(dc, i, 0);
3429
3430                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3431
3432                 c_irq_params->adev = adev;
3433                 c_irq_params->irq_src = int_params.irq_source;
3434
3435                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3436                                 dm_pflip_high_irq, c_irq_params);
3437
3438         }
3439
3440         /* HPD */
3441         r = amdgpu_irq_add_id(adev, client_id,
3442                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3443         if (r) {
3444                 DRM_ERROR("Failed to add hpd irq id!\n");
3445                 return r;
3446         }
3447
3448         register_hpd_handlers(adev);
3449
3450         return 0;
3451 }
3452 #endif
3453
3454 /* Register IRQ sources and initialize IRQ callbacks */
3455 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3456 {
3457         struct dc *dc = adev->dm.dc;
3458         struct common_irq_params *c_irq_params;
3459         struct dc_interrupt_params int_params = {0};
3460         int r;
3461         int i;
3462         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3463
3464         if (adev->family >= AMDGPU_FAMILY_AI)
3465                 client_id = SOC15_IH_CLIENTID_DCE;
3466
3467         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3468         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3469
3470         /*
3471          * Actions of amdgpu_irq_add_id():
3472          * 1. Register a set() function with base driver.
3473          *    Base driver will call set() function to enable/disable an
3474          *    interrupt in DC hardware.
3475          * 2. Register amdgpu_dm_irq_handler().
3476          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3477          *    coming from DC hardware.
3478          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3479          *    for acknowledging and handling. */
3480
3481         /* Use VBLANK interrupt */
3482         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3483                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3484                 if (r) {
3485                         DRM_ERROR("Failed to add crtc irq id!\n");
3486                         return r;
3487                 }
3488
3489                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3490                 int_params.irq_source =
3491                         dc_interrupt_to_irq_source(dc, i, 0);
3492
3493                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3494
3495                 c_irq_params->adev = adev;
3496                 c_irq_params->irq_src = int_params.irq_source;
3497
3498                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3499                                 dm_crtc_high_irq, c_irq_params);
3500         }
3501
3502         /* Use VUPDATE interrupt */
3503         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3504                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3505                 if (r) {
3506                         DRM_ERROR("Failed to add vupdate irq id!\n");
3507                         return r;
3508                 }
3509
3510                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3511                 int_params.irq_source =
3512                         dc_interrupt_to_irq_source(dc, i, 0);
3513
3514                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3515
3516                 c_irq_params->adev = adev;
3517                 c_irq_params->irq_src = int_params.irq_source;
3518
3519                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3520                                 dm_vupdate_high_irq, c_irq_params);
3521         }
3522
3523         /* Use GRPH_PFLIP interrupt */
3524         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3525                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3526                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3527                 if (r) {
3528                         DRM_ERROR("Failed to add page flip irq id!\n");
3529                         return r;
3530                 }
3531
3532                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3533                 int_params.irq_source =
3534                         dc_interrupt_to_irq_source(dc, i, 0);
3535
3536                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3537
3538                 c_irq_params->adev = adev;
3539                 c_irq_params->irq_src = int_params.irq_source;
3540
3541                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3542                                 dm_pflip_high_irq, c_irq_params);
3543
3544         }
3545
3546         /* HPD */
3547         r = amdgpu_irq_add_id(adev, client_id,
3548                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3549         if (r) {
3550                 DRM_ERROR("Failed to add hpd irq id!\n");
3551                 return r;
3552         }
3553
3554         register_hpd_handlers(adev);
3555
3556         return 0;
3557 }
3558
3559 /* Register IRQ sources and initialize IRQ callbacks */
3560 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3561 {
3562         struct dc *dc = adev->dm.dc;
3563         struct common_irq_params *c_irq_params;
3564         struct dc_interrupt_params int_params = {0};
3565         int r;
3566         int i;
3567 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3568         static const unsigned int vrtl_int_srcid[] = {
3569                 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3570                 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3571                 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3572                 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3573                 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3574                 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3575         };
3576 #endif
3577
3578         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3579         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3580
3581         /*
3582          * Actions of amdgpu_irq_add_id():
3583          * 1. Register a set() function with base driver.
3584          *    Base driver will call set() function to enable/disable an
3585          *    interrupt in DC hardware.
3586          * 2. Register amdgpu_dm_irq_handler().
3587          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3588          *    coming from DC hardware.
3589          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3590          *    for acknowledging and handling.
3591          */
3592
3593         /* Use VSTARTUP interrupt */
3594         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3595                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3596                         i++) {
3597                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3598
3599                 if (r) {
3600                         DRM_ERROR("Failed to add crtc irq id!\n");
3601                         return r;
3602                 }
3603
3604                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3605                 int_params.irq_source =
3606                         dc_interrupt_to_irq_source(dc, i, 0);
3607
3608                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3609
3610                 c_irq_params->adev = adev;
3611                 c_irq_params->irq_src = int_params.irq_source;
3612
3613                 amdgpu_dm_irq_register_interrupt(
3614                         adev, &int_params, dm_crtc_high_irq, c_irq_params);
3615         }
3616
3617         /* Use otg vertical line interrupt */
3618 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3619         for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3620                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3621                                 vrtl_int_srcid[i], &adev->vline0_irq);
3622
3623                 if (r) {
3624                         DRM_ERROR("Failed to add vline0 irq id!\n");
3625                         return r;
3626                 }
3627
3628                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3629                 int_params.irq_source =
3630                         dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3631
3632                 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3633                         DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3634                         break;
3635                 }
3636
3637                 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3638                                         - DC_IRQ_SOURCE_DC1_VLINE0];
3639
3640                 c_irq_params->adev = adev;
3641                 c_irq_params->irq_src = int_params.irq_source;
3642
3643                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3644                                 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3645         }
3646 #endif
3647
3648         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3649          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3650          * to trigger at end of each vblank, regardless of state of the lock,
3651          * matching DCE behaviour.
3652          */
3653         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3654              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3655              i++) {
3656                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3657
3658                 if (r) {
3659                         DRM_ERROR("Failed to add vupdate irq id!\n");
3660                         return r;
3661                 }
3662
3663                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3664                 int_params.irq_source =
3665                         dc_interrupt_to_irq_source(dc, i, 0);
3666
3667                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3668
3669                 c_irq_params->adev = adev;
3670                 c_irq_params->irq_src = int_params.irq_source;
3671
3672                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3673                                 dm_vupdate_high_irq, c_irq_params);
3674         }
3675
3676         /* Use GRPH_PFLIP interrupt */
3677         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3678                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
3679                         i++) {
3680                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3681                 if (r) {
3682                         DRM_ERROR("Failed to add page flip irq id!\n");
3683                         return r;
3684                 }
3685
3686                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3687                 int_params.irq_source =
3688                         dc_interrupt_to_irq_source(dc, i, 0);
3689
3690                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3691
3692                 c_irq_params->adev = adev;
3693                 c_irq_params->irq_src = int_params.irq_source;
3694
3695                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3696                                 dm_pflip_high_irq, c_irq_params);
3697
3698         }
3699
3700         /* HPD */
3701         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3702                         &adev->hpd_irq);
3703         if (r) {
3704                 DRM_ERROR("Failed to add hpd irq id!\n");
3705                 return r;
3706         }
3707
3708         register_hpd_handlers(adev);
3709
3710         return 0;
3711 }
3712 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3713 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3714 {
3715         struct dc *dc = adev->dm.dc;
3716         struct common_irq_params *c_irq_params;
3717         struct dc_interrupt_params int_params = {0};
3718         int r, i;
3719
3720         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3721         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3722
3723         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3724                         &adev->dmub_outbox_irq);
3725         if (r) {
3726                 DRM_ERROR("Failed to add outbox irq id!\n");
3727                 return r;
3728         }
3729
3730         if (dc->ctx->dmub_srv) {
3731                 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3732                 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3733                 int_params.irq_source =
3734                 dc_interrupt_to_irq_source(dc, i, 0);
3735
3736                 c_irq_params = &adev->dm.dmub_outbox_params[0];
3737
3738                 c_irq_params->adev = adev;
3739                 c_irq_params->irq_src = int_params.irq_source;
3740
3741                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3742                                 dm_dmub_outbox1_low_irq, c_irq_params);
3743         }
3744
3745         return 0;
3746 }
3747
3748 /*
3749  * Acquires the lock for the atomic state object and returns
3750  * the new atomic state.
3751  *
3752  * This should only be called during atomic check.
3753  */
3754 int dm_atomic_get_state(struct drm_atomic_state *state,
3755                         struct dm_atomic_state **dm_state)
3756 {
3757         struct drm_device *dev = state->dev;
3758         struct amdgpu_device *adev = drm_to_adev(dev);
3759         struct amdgpu_display_manager *dm = &adev->dm;
3760         struct drm_private_state *priv_state;
3761
3762         if (*dm_state)
3763                 return 0;
3764
3765         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3766         if (IS_ERR(priv_state))
3767                 return PTR_ERR(priv_state);
3768
3769         *dm_state = to_dm_atomic_state(priv_state);
3770
3771         return 0;
3772 }
3773
3774 static struct dm_atomic_state *
3775 dm_atomic_get_new_state(struct drm_atomic_state *state)
3776 {
3777         struct drm_device *dev = state->dev;
3778         struct amdgpu_device *adev = drm_to_adev(dev);
3779         struct amdgpu_display_manager *dm = &adev->dm;
3780         struct drm_private_obj *obj;
3781         struct drm_private_state *new_obj_state;
3782         int i;
3783
3784         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3785                 if (obj->funcs == dm->atomic_obj.funcs)
3786                         return to_dm_atomic_state(new_obj_state);
3787         }
3788
3789         return NULL;
3790 }
3791
3792 static struct drm_private_state *
3793 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3794 {
3795         struct dm_atomic_state *old_state, *new_state;
3796
3797         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3798         if (!new_state)
3799                 return NULL;
3800
3801         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3802
3803         old_state = to_dm_atomic_state(obj->state);
3804
3805         if (old_state && old_state->context)
3806                 new_state->context = dc_copy_state(old_state->context);
3807
3808         if (!new_state->context) {
3809                 kfree(new_state);
3810                 return NULL;
3811         }
3812
3813         return &new_state->base;
3814 }
3815
3816 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3817                                     struct drm_private_state *state)
3818 {
3819         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3820
3821         if (dm_state && dm_state->context)
3822                 dc_release_state(dm_state->context);
3823
3824         kfree(dm_state);
3825 }
3826
3827 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3828         .atomic_duplicate_state = dm_atomic_duplicate_state,
3829         .atomic_destroy_state = dm_atomic_destroy_state,
3830 };
3831
3832 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3833 {
3834         struct dm_atomic_state *state;
3835         int r;
3836
3837         adev->mode_info.mode_config_initialized = true;
3838
3839         adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3840         adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3841
3842         adev_to_drm(adev)->mode_config.max_width = 16384;
3843         adev_to_drm(adev)->mode_config.max_height = 16384;
3844
3845         adev_to_drm(adev)->mode_config.preferred_depth = 24;
3846         if (adev->asic_type == CHIP_HAWAII)
3847                 /* disable prefer shadow for now due to hibernation issues */
3848                 adev_to_drm(adev)->mode_config.prefer_shadow = 0;
3849         else
3850                 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3851         /* indicates support for immediate flip */
3852         adev_to_drm(adev)->mode_config.async_page_flip = true;
3853
3854         state = kzalloc(sizeof(*state), GFP_KERNEL);
3855         if (!state)
3856                 return -ENOMEM;
3857
3858         state->context = dc_create_state(adev->dm.dc);
3859         if (!state->context) {
3860                 kfree(state);
3861                 return -ENOMEM;
3862         }
3863
3864         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3865
3866         drm_atomic_private_obj_init(adev_to_drm(adev),
3867                                     &adev->dm.atomic_obj,
3868                                     &state->base,
3869                                     &dm_atomic_state_funcs);
3870
3871         r = amdgpu_display_modeset_create_props(adev);
3872         if (r) {
3873                 dc_release_state(state->context);
3874                 kfree(state);
3875                 return r;
3876         }
3877
3878         r = amdgpu_dm_audio_init(adev);
3879         if (r) {
3880                 dc_release_state(state->context);
3881                 kfree(state);
3882                 return r;
3883         }
3884
3885         return 0;
3886 }
3887
3888 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3889 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3890 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3891
3892 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3893                                             int bl_idx)
3894 {
3895 #if defined(CONFIG_ACPI)
3896         struct amdgpu_dm_backlight_caps caps;
3897
3898         memset(&caps, 0, sizeof(caps));
3899
3900         if (dm->backlight_caps[bl_idx].caps_valid)
3901                 return;
3902
3903         amdgpu_acpi_get_backlight_caps(&caps);
3904         if (caps.caps_valid) {
3905                 dm->backlight_caps[bl_idx].caps_valid = true;
3906                 if (caps.aux_support)
3907                         return;
3908                 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3909                 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3910         } else {
3911                 dm->backlight_caps[bl_idx].min_input_signal =
3912                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3913                 dm->backlight_caps[bl_idx].max_input_signal =
3914                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3915         }
3916 #else
3917         if (dm->backlight_caps[bl_idx].aux_support)
3918                 return;
3919
3920         dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3921         dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3922 #endif
3923 }
3924
3925 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3926                                 unsigned *min, unsigned *max)
3927 {
3928         if (!caps)
3929                 return 0;
3930
3931         if (caps->aux_support) {
3932                 // Firmware limits are in nits, DC API wants millinits.
3933                 *max = 1000 * caps->aux_max_input_signal;
3934                 *min = 1000 * caps->aux_min_input_signal;
3935         } else {
3936                 // Firmware limits are 8-bit, PWM control is 16-bit.
3937                 *max = 0x101 * caps->max_input_signal;
3938                 *min = 0x101 * caps->min_input_signal;
3939         }
3940         return 1;
3941 }
3942
3943 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3944                                         uint32_t brightness)
3945 {
3946         unsigned min, max;
3947
3948         if (!get_brightness_range(caps, &min, &max))
3949                 return brightness;
3950
3951         // Rescale 0..255 to min..max
3952         return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3953                                        AMDGPU_MAX_BL_LEVEL);
3954 }
3955
3956 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3957                                       uint32_t brightness)
3958 {
3959         unsigned min, max;
3960
3961         if (!get_brightness_range(caps, &min, &max))
3962                 return brightness;
3963
3964         if (brightness < min)
3965                 return 0;
3966         // Rescale min..max to 0..255
3967         return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3968                                  max - min);
3969 }
3970
3971 static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3972                                          int bl_idx,
3973                                          u32 user_brightness)
3974 {
3975         struct amdgpu_dm_backlight_caps caps;
3976         struct dc_link *link;
3977         u32 brightness;
3978         bool rc;
3979
3980         amdgpu_dm_update_backlight_caps(dm, bl_idx);
3981         caps = dm->backlight_caps[bl_idx];
3982
3983         dm->brightness[bl_idx] = user_brightness;
3984         /* update scratch register */
3985         if (bl_idx == 0)
3986                 amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
3987         brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3988         link = (struct dc_link *)dm->backlight_link[bl_idx];
3989
3990         /* Change brightness based on AUX property */
3991         if (caps.aux_support) {
3992                 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3993                                                       AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3994                 if (!rc)
3995                         DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
3996         } else {
3997                 rc = dc_link_set_backlight_level(link, brightness, 0);
3998                 if (!rc)
3999                         DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
4000         }
4001
4002         if (rc)
4003                 dm->actual_brightness[bl_idx] = user_brightness;
4004 }
4005
4006 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
4007 {
4008         struct amdgpu_display_manager *dm = bl_get_data(bd);
4009         int i;
4010
4011         for (i = 0; i < dm->num_of_edps; i++) {
4012                 if (bd == dm->backlight_dev[i])
4013                         break;
4014         }
4015         if (i >= AMDGPU_DM_MAX_NUM_EDP)
4016                 i = 0;
4017         amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
4018
4019         return 0;
4020 }
4021
4022 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
4023                                          int bl_idx)
4024 {
4025         struct amdgpu_dm_backlight_caps caps;
4026         struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
4027
4028         amdgpu_dm_update_backlight_caps(dm, bl_idx);
4029         caps = dm->backlight_caps[bl_idx];
4030
4031         if (caps.aux_support) {
4032                 u32 avg, peak;
4033                 bool rc;
4034
4035                 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
4036                 if (!rc)
4037                         return dm->brightness[bl_idx];
4038                 return convert_brightness_to_user(&caps, avg);
4039         } else {
4040                 int ret = dc_link_get_backlight_level(link);
4041
4042                 if (ret == DC_ERROR_UNEXPECTED)
4043                         return dm->brightness[bl_idx];
4044                 return convert_brightness_to_user(&caps, ret);
4045         }
4046 }
4047
4048 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
4049 {
4050         struct amdgpu_display_manager *dm = bl_get_data(bd);
4051         int i;
4052
4053         for (i = 0; i < dm->num_of_edps; i++) {
4054                 if (bd == dm->backlight_dev[i])
4055                         break;
4056         }
4057         if (i >= AMDGPU_DM_MAX_NUM_EDP)
4058                 i = 0;
4059         return amdgpu_dm_backlight_get_level(dm, i);
4060 }
4061
4062 static const struct backlight_ops amdgpu_dm_backlight_ops = {
4063         .options = BL_CORE_SUSPENDRESUME,
4064         .get_brightness = amdgpu_dm_backlight_get_brightness,
4065         .update_status  = amdgpu_dm_backlight_update_status,
4066 };
4067
4068 static void
4069 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4070 {
4071         char bl_name[16];
4072         struct backlight_properties props = { 0 };
4073
4074         amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4075         dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4076
4077         if (!acpi_video_backlight_use_native()) {
4078                 drm_info(adev_to_drm(dm->adev), "Skipping amdgpu DM backlight registration\n");
4079                 /* Try registering an ACPI video backlight device instead. */
4080                 acpi_video_register_backlight();
4081                 return;
4082         }
4083
4084         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4085         props.brightness = AMDGPU_MAX_BL_LEVEL;
4086         props.type = BACKLIGHT_RAW;
4087
4088         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4089                  adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4090
4091         dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4092                                                                        adev_to_drm(dm->adev)->dev,
4093                                                                        dm,
4094                                                                        &amdgpu_dm_backlight_ops,
4095                                                                        &props);
4096
4097         if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4098                 DRM_ERROR("DM: Backlight registration failed!\n");
4099         else
4100                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4101 }
4102
4103 static int initialize_plane(struct amdgpu_display_manager *dm,
4104                             struct amdgpu_mode_info *mode_info, int plane_id,
4105                             enum drm_plane_type plane_type,
4106                             const struct dc_plane_cap *plane_cap)
4107 {
4108         struct drm_plane *plane;
4109         unsigned long possible_crtcs;
4110         int ret = 0;
4111
4112         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4113         if (!plane) {
4114                 DRM_ERROR("KMS: Failed to allocate plane\n");
4115                 return -ENOMEM;
4116         }
4117         plane->type = plane_type;
4118
4119         /*
4120          * HACK: IGT tests expect that the primary plane for a CRTC
4121          * can only have one possible CRTC. Only expose support for
4122          * any CRTC if they're not going to be used as a primary plane
4123          * for a CRTC - like overlay or underlay planes.
4124          */
4125         possible_crtcs = 1 << plane_id;
4126         if (plane_id >= dm->dc->caps.max_streams)
4127                 possible_crtcs = 0xff;
4128
4129         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4130
4131         if (ret) {
4132                 DRM_ERROR("KMS: Failed to initialize plane\n");
4133                 kfree(plane);
4134                 return ret;
4135         }
4136
4137         if (mode_info)
4138                 mode_info->planes[plane_id] = plane;
4139
4140         return ret;
4141 }
4142
4143
4144 static void register_backlight_device(struct amdgpu_display_manager *dm,
4145                                       struct dc_link *link)
4146 {
4147         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4148             link->type != dc_connection_none) {
4149                 /*
4150                  * Event if registration failed, we should continue with
4151                  * DM initialization because not having a backlight control
4152                  * is better then a black screen.
4153                  */
4154                 if (!dm->backlight_dev[dm->num_of_edps])
4155                         amdgpu_dm_register_backlight_device(dm);
4156
4157                 if (dm->backlight_dev[dm->num_of_edps]) {
4158                         dm->backlight_link[dm->num_of_edps] = link;
4159                         dm->num_of_edps++;
4160                 }
4161         }
4162 }
4163
4164 static void amdgpu_set_panel_orientation(struct drm_connector *connector);
4165
4166 /*
4167  * In this architecture, the association
4168  * connector -> encoder -> crtc
4169  * id not really requried. The crtc and connector will hold the
4170  * display_index as an abstraction to use with DAL component
4171  *
4172  * Returns 0 on success
4173  */
4174 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4175 {
4176         struct amdgpu_display_manager *dm = &adev->dm;
4177         int32_t i;
4178         struct amdgpu_dm_connector *aconnector = NULL;
4179         struct amdgpu_encoder *aencoder = NULL;
4180         struct amdgpu_mode_info *mode_info = &adev->mode_info;
4181         uint32_t link_cnt;
4182         int32_t primary_planes;
4183         enum dc_connection_type new_connection_type = dc_connection_none;
4184         const struct dc_plane_cap *plane;
4185         bool psr_feature_enabled = false;
4186
4187         dm->display_indexes_num = dm->dc->caps.max_streams;
4188         /* Update the actual used number of crtc */
4189         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4190
4191         link_cnt = dm->dc->caps.max_links;
4192         if (amdgpu_dm_mode_config_init(dm->adev)) {
4193                 DRM_ERROR("DM: Failed to initialize mode config\n");
4194                 return -EINVAL;
4195         }
4196
4197         /* There is one primary plane per CRTC */
4198         primary_planes = dm->dc->caps.max_streams;
4199         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4200
4201         /*
4202          * Initialize primary planes, implicit planes for legacy IOCTLS.
4203          * Order is reversed to match iteration order in atomic check.
4204          */
4205         for (i = (primary_planes - 1); i >= 0; i--) {
4206                 plane = &dm->dc->caps.planes[i];
4207
4208                 if (initialize_plane(dm, mode_info, i,
4209                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
4210                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
4211                         goto fail;
4212                 }
4213         }
4214
4215         /*
4216          * Initialize overlay planes, index starting after primary planes.
4217          * These planes have a higher DRM index than the primary planes since
4218          * they should be considered as having a higher z-order.
4219          * Order is reversed to match iteration order in atomic check.
4220          *
4221          * Only support DCN for now, and only expose one so we don't encourage
4222          * userspace to use up all the pipes.
4223          */
4224         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4225                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4226
4227                 /* Do not create overlay if MPO disabled */
4228                 if (amdgpu_dc_debug_mask & DC_DISABLE_MPO)
4229                         break;
4230
4231                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4232                         continue;
4233
4234                 if (!plane->blends_with_above || !plane->blends_with_below)
4235                         continue;
4236
4237                 if (!plane->pixel_format_support.argb8888)
4238                         continue;
4239
4240                 if (initialize_plane(dm, NULL, primary_planes + i,
4241                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
4242                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4243                         goto fail;
4244                 }
4245
4246                 /* Only create one overlay plane. */
4247                 break;
4248         }
4249
4250         for (i = 0; i < dm->dc->caps.max_streams; i++)
4251                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4252                         DRM_ERROR("KMS: Failed to initialize crtc\n");
4253                         goto fail;
4254                 }
4255
4256         /* Use Outbox interrupt */
4257         switch (adev->ip_versions[DCE_HWIP][0]) {
4258         case IP_VERSION(3, 0, 0):
4259         case IP_VERSION(3, 1, 2):
4260         case IP_VERSION(3, 1, 3):
4261         case IP_VERSION(3, 1, 4):
4262         case IP_VERSION(3, 1, 5):
4263         case IP_VERSION(3, 1, 6):
4264         case IP_VERSION(3, 2, 0):
4265         case IP_VERSION(3, 2, 1):
4266         case IP_VERSION(2, 1, 0):
4267                 if (register_outbox_irq_handlers(dm->adev)) {
4268                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4269                         goto fail;
4270                 }
4271                 break;
4272         default:
4273                 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4274                               adev->ip_versions[DCE_HWIP][0]);
4275         }
4276
4277         /* Determine whether to enable PSR support by default. */
4278         if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4279                 switch (adev->ip_versions[DCE_HWIP][0]) {
4280                 case IP_VERSION(3, 1, 2):
4281                 case IP_VERSION(3, 1, 3):
4282                 case IP_VERSION(3, 1, 4):
4283                 case IP_VERSION(3, 1, 5):
4284                 case IP_VERSION(3, 1, 6):
4285                 case IP_VERSION(3, 2, 0):
4286                 case IP_VERSION(3, 2, 1):
4287                         psr_feature_enabled = true;
4288                         break;
4289                 default:
4290                         psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4291                         break;
4292                 }
4293         }
4294
4295         /* loops over all connectors on the board */
4296         for (i = 0; i < link_cnt; i++) {
4297                 struct dc_link *link = NULL;
4298
4299                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4300                         DRM_ERROR(
4301                                 "KMS: Cannot support more than %d display indexes\n",
4302                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
4303                         continue;
4304                 }
4305
4306                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4307                 if (!aconnector)
4308                         goto fail;
4309
4310                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4311                 if (!aencoder)
4312                         goto fail;
4313
4314                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4315                         DRM_ERROR("KMS: Failed to initialize encoder\n");
4316                         goto fail;
4317                 }
4318
4319                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4320                         DRM_ERROR("KMS: Failed to initialize connector\n");
4321                         goto fail;
4322                 }
4323
4324                 link = dc_get_link_at_index(dm->dc, i);
4325
4326                 if (!dc_link_detect_sink(link, &new_connection_type))
4327                         DRM_ERROR("KMS: Failed to detect connector\n");
4328
4329                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
4330                         emulated_link_detect(link);
4331                         amdgpu_dm_update_connector_after_detect(aconnector);
4332                 } else {
4333                         bool ret = false;
4334
4335                         mutex_lock(&dm->dc_lock);
4336                         ret = dc_link_detect(link, DETECT_REASON_BOOT);
4337                         mutex_unlock(&dm->dc_lock);
4338
4339                         if (ret) {
4340                                 amdgpu_dm_update_connector_after_detect(aconnector);
4341                                 register_backlight_device(dm, link);
4342
4343                                 if (dm->num_of_edps)
4344                                         update_connector_ext_caps(aconnector);
4345
4346                                 if (psr_feature_enabled)
4347                                         amdgpu_dm_set_psr_caps(link);
4348
4349                                 /* TODO: Fix vblank control helpers to delay PSR entry to allow this when
4350                                  * PSR is also supported.
4351                                  */
4352                                 if (link->psr_settings.psr_feature_enabled)
4353                                         adev_to_drm(adev)->vblank_disable_immediate = false;
4354                         }
4355                 }
4356                 amdgpu_set_panel_orientation(&aconnector->base);
4357         }
4358
4359         /* If we didn't find a panel, notify the acpi video detection */
4360         if (dm->adev->flags & AMD_IS_APU && dm->num_of_edps == 0)
4361                 acpi_video_report_nolcd();
4362
4363         /* Software is initialized. Now we can register interrupt handlers. */
4364         switch (adev->asic_type) {
4365 #if defined(CONFIG_DRM_AMD_DC_SI)
4366         case CHIP_TAHITI:
4367         case CHIP_PITCAIRN:
4368         case CHIP_VERDE:
4369         case CHIP_OLAND:
4370                 if (dce60_register_irq_handlers(dm->adev)) {
4371                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4372                         goto fail;
4373                 }
4374                 break;
4375 #endif
4376         case CHIP_BONAIRE:
4377         case CHIP_HAWAII:
4378         case CHIP_KAVERI:
4379         case CHIP_KABINI:
4380         case CHIP_MULLINS:
4381         case CHIP_TONGA:
4382         case CHIP_FIJI:
4383         case CHIP_CARRIZO:
4384         case CHIP_STONEY:
4385         case CHIP_POLARIS11:
4386         case CHIP_POLARIS10:
4387         case CHIP_POLARIS12:
4388         case CHIP_VEGAM:
4389         case CHIP_VEGA10:
4390         case CHIP_VEGA12:
4391         case CHIP_VEGA20:
4392                 if (dce110_register_irq_handlers(dm->adev)) {
4393                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4394                         goto fail;
4395                 }
4396                 break;
4397         default:
4398                 switch (adev->ip_versions[DCE_HWIP][0]) {
4399                 case IP_VERSION(1, 0, 0):
4400                 case IP_VERSION(1, 0, 1):
4401                 case IP_VERSION(2, 0, 2):
4402                 case IP_VERSION(2, 0, 3):
4403                 case IP_VERSION(2, 0, 0):
4404                 case IP_VERSION(2, 1, 0):
4405                 case IP_VERSION(3, 0, 0):
4406                 case IP_VERSION(3, 0, 2):
4407                 case IP_VERSION(3, 0, 3):
4408                 case IP_VERSION(3, 0, 1):
4409                 case IP_VERSION(3, 1, 2):
4410                 case IP_VERSION(3, 1, 3):
4411                 case IP_VERSION(3, 1, 4):
4412                 case IP_VERSION(3, 1, 5):
4413                 case IP_VERSION(3, 1, 6):
4414                 case IP_VERSION(3, 2, 0):
4415                 case IP_VERSION(3, 2, 1):
4416                         if (dcn10_register_irq_handlers(dm->adev)) {
4417                                 DRM_ERROR("DM: Failed to initialize IRQ\n");
4418                                 goto fail;
4419                         }
4420                         break;
4421                 default:
4422                         DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4423                                         adev->ip_versions[DCE_HWIP][0]);
4424                         goto fail;
4425                 }
4426                 break;
4427         }
4428
4429         return 0;
4430 fail:
4431         kfree(aencoder);
4432         kfree(aconnector);
4433
4434         return -EINVAL;
4435 }
4436
4437 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4438 {
4439         drm_atomic_private_obj_fini(&dm->atomic_obj);
4440         return;
4441 }
4442
4443 /******************************************************************************
4444  * amdgpu_display_funcs functions
4445  *****************************************************************************/
4446
4447 /*
4448  * dm_bandwidth_update - program display watermarks
4449  *
4450  * @adev: amdgpu_device pointer
4451  *
4452  * Calculate and program the display watermarks and line buffer allocation.
4453  */
4454 static void dm_bandwidth_update(struct amdgpu_device *adev)
4455 {
4456         /* TODO: implement later */
4457 }
4458
4459 static const struct amdgpu_display_funcs dm_display_funcs = {
4460         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4461         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4462         .backlight_set_level = NULL, /* never called for DC */
4463         .backlight_get_level = NULL, /* never called for DC */
4464         .hpd_sense = NULL,/* called unconditionally */
4465         .hpd_set_polarity = NULL, /* called unconditionally */
4466         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4467         .page_flip_get_scanoutpos =
4468                 dm_crtc_get_scanoutpos,/* called unconditionally */
4469         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4470         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4471 };
4472
4473 #if defined(CONFIG_DEBUG_KERNEL_DC)
4474
4475 static ssize_t s3_debug_store(struct device *device,
4476                               struct device_attribute *attr,
4477                               const char *buf,
4478                               size_t count)
4479 {
4480         int ret;
4481         int s3_state;
4482         struct drm_device *drm_dev = dev_get_drvdata(device);
4483         struct amdgpu_device *adev = drm_to_adev(drm_dev);
4484
4485         ret = kstrtoint(buf, 0, &s3_state);
4486
4487         if (ret == 0) {
4488                 if (s3_state) {
4489                         dm_resume(adev);
4490                         drm_kms_helper_hotplug_event(adev_to_drm(adev));
4491                 } else
4492                         dm_suspend(adev);
4493         }
4494
4495         return ret == 0 ? count : 0;
4496 }
4497
4498 DEVICE_ATTR_WO(s3_debug);
4499
4500 #endif
4501
4502 static int dm_early_init(void *handle)
4503 {
4504         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4505
4506         switch (adev->asic_type) {
4507 #if defined(CONFIG_DRM_AMD_DC_SI)
4508         case CHIP_TAHITI:
4509         case CHIP_PITCAIRN:
4510         case CHIP_VERDE:
4511                 adev->mode_info.num_crtc = 6;
4512                 adev->mode_info.num_hpd = 6;
4513                 adev->mode_info.num_dig = 6;
4514                 break;
4515         case CHIP_OLAND:
4516                 adev->mode_info.num_crtc = 2;
4517                 adev->mode_info.num_hpd = 2;
4518                 adev->mode_info.num_dig = 2;
4519                 break;
4520 #endif
4521         case CHIP_BONAIRE:
4522         case CHIP_HAWAII:
4523                 adev->mode_info.num_crtc = 6;
4524                 adev->mode_info.num_hpd = 6;
4525                 adev->mode_info.num_dig = 6;
4526                 break;
4527         case CHIP_KAVERI:
4528                 adev->mode_info.num_crtc = 4;
4529                 adev->mode_info.num_hpd = 6;
4530                 adev->mode_info.num_dig = 7;
4531                 break;
4532         case CHIP_KABINI:
4533         case CHIP_MULLINS:
4534                 adev->mode_info.num_crtc = 2;
4535                 adev->mode_info.num_hpd = 6;
4536                 adev->mode_info.num_dig = 6;
4537                 break;
4538         case CHIP_FIJI:
4539         case CHIP_TONGA:
4540                 adev->mode_info.num_crtc = 6;
4541                 adev->mode_info.num_hpd = 6;
4542                 adev->mode_info.num_dig = 7;
4543                 break;
4544         case CHIP_CARRIZO:
4545                 adev->mode_info.num_crtc = 3;
4546                 adev->mode_info.num_hpd = 6;
4547                 adev->mode_info.num_dig = 9;
4548                 break;
4549         case CHIP_STONEY:
4550                 adev->mode_info.num_crtc = 2;
4551                 adev->mode_info.num_hpd = 6;
4552                 adev->mode_info.num_dig = 9;
4553                 break;
4554         case CHIP_POLARIS11:
4555         case CHIP_POLARIS12:
4556                 adev->mode_info.num_crtc = 5;
4557                 adev->mode_info.num_hpd = 5;
4558                 adev->mode_info.num_dig = 5;
4559                 break;
4560         case CHIP_POLARIS10:
4561         case CHIP_VEGAM:
4562                 adev->mode_info.num_crtc = 6;
4563                 adev->mode_info.num_hpd = 6;
4564                 adev->mode_info.num_dig = 6;
4565                 break;
4566         case CHIP_VEGA10:
4567         case CHIP_VEGA12:
4568         case CHIP_VEGA20:
4569                 adev->mode_info.num_crtc = 6;
4570                 adev->mode_info.num_hpd = 6;
4571                 adev->mode_info.num_dig = 6;
4572                 break;
4573         default:
4574
4575                 switch (adev->ip_versions[DCE_HWIP][0]) {
4576                 case IP_VERSION(2, 0, 2):
4577                 case IP_VERSION(3, 0, 0):
4578                         adev->mode_info.num_crtc = 6;
4579                         adev->mode_info.num_hpd = 6;
4580                         adev->mode_info.num_dig = 6;
4581                         break;
4582                 case IP_VERSION(2, 0, 0):
4583                 case IP_VERSION(3, 0, 2):
4584                         adev->mode_info.num_crtc = 5;
4585                         adev->mode_info.num_hpd = 5;
4586                         adev->mode_info.num_dig = 5;
4587                         break;
4588                 case IP_VERSION(2, 0, 3):
4589                 case IP_VERSION(3, 0, 3):
4590                         adev->mode_info.num_crtc = 2;
4591                         adev->mode_info.num_hpd = 2;
4592                         adev->mode_info.num_dig = 2;
4593                         break;
4594                 case IP_VERSION(1, 0, 0):
4595                 case IP_VERSION(1, 0, 1):
4596                 case IP_VERSION(3, 0, 1):
4597                 case IP_VERSION(2, 1, 0):
4598                 case IP_VERSION(3, 1, 2):
4599                 case IP_VERSION(3, 1, 3):
4600                 case IP_VERSION(3, 1, 4):
4601                 case IP_VERSION(3, 1, 5):
4602                 case IP_VERSION(3, 1, 6):
4603                 case IP_VERSION(3, 2, 0):
4604                 case IP_VERSION(3, 2, 1):
4605                         adev->mode_info.num_crtc = 4;
4606                         adev->mode_info.num_hpd = 4;
4607                         adev->mode_info.num_dig = 4;
4608                         break;
4609                 default:
4610                         DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4611                                         adev->ip_versions[DCE_HWIP][0]);
4612                         return -EINVAL;
4613                 }
4614                 break;
4615         }
4616
4617         amdgpu_dm_set_irq_funcs(adev);
4618
4619         if (adev->mode_info.funcs == NULL)
4620                 adev->mode_info.funcs = &dm_display_funcs;
4621
4622         /*
4623          * Note: Do NOT change adev->audio_endpt_rreg and
4624          * adev->audio_endpt_wreg because they are initialised in
4625          * amdgpu_device_init()
4626          */
4627 #if defined(CONFIG_DEBUG_KERNEL_DC)
4628         device_create_file(
4629                 adev_to_drm(adev)->dev,
4630                 &dev_attr_s3_debug);
4631 #endif
4632         adev->dc_enabled = true;
4633
4634         return 0;
4635 }
4636
4637 static bool modereset_required(struct drm_crtc_state *crtc_state)
4638 {
4639         return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4640 }
4641
4642 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4643 {
4644         drm_encoder_cleanup(encoder);
4645         kfree(encoder);
4646 }
4647
4648 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4649         .destroy = amdgpu_dm_encoder_destroy,
4650 };
4651
4652 static int
4653 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4654                             const enum surface_pixel_format format,
4655                             enum dc_color_space *color_space)
4656 {
4657         bool full_range;
4658
4659         *color_space = COLOR_SPACE_SRGB;
4660
4661         /* DRM color properties only affect non-RGB formats. */
4662         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4663                 return 0;
4664
4665         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4666
4667         switch (plane_state->color_encoding) {
4668         case DRM_COLOR_YCBCR_BT601:
4669                 if (full_range)
4670                         *color_space = COLOR_SPACE_YCBCR601;
4671                 else
4672                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4673                 break;
4674
4675         case DRM_COLOR_YCBCR_BT709:
4676                 if (full_range)
4677                         *color_space = COLOR_SPACE_YCBCR709;
4678                 else
4679                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4680                 break;
4681
4682         case DRM_COLOR_YCBCR_BT2020:
4683                 if (full_range)
4684                         *color_space = COLOR_SPACE_2020_YCBCR;
4685                 else
4686                         return -EINVAL;
4687                 break;
4688
4689         default:
4690                 return -EINVAL;
4691         }
4692
4693         return 0;
4694 }
4695
4696 static int
4697 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4698                             const struct drm_plane_state *plane_state,
4699                             const uint64_t tiling_flags,
4700                             struct dc_plane_info *plane_info,
4701                             struct dc_plane_address *address,
4702                             bool tmz_surface,
4703                             bool force_disable_dcc)
4704 {
4705         const struct drm_framebuffer *fb = plane_state->fb;
4706         const struct amdgpu_framebuffer *afb =
4707                 to_amdgpu_framebuffer(plane_state->fb);
4708         int ret;
4709
4710         memset(plane_info, 0, sizeof(*plane_info));
4711
4712         switch (fb->format->format) {
4713         case DRM_FORMAT_C8:
4714                 plane_info->format =
4715                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4716                 break;
4717         case DRM_FORMAT_RGB565:
4718                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4719                 break;
4720         case DRM_FORMAT_XRGB8888:
4721         case DRM_FORMAT_ARGB8888:
4722                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4723                 break;
4724         case DRM_FORMAT_XRGB2101010:
4725         case DRM_FORMAT_ARGB2101010:
4726                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4727                 break;
4728         case DRM_FORMAT_XBGR2101010:
4729         case DRM_FORMAT_ABGR2101010:
4730                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4731                 break;
4732         case DRM_FORMAT_XBGR8888:
4733         case DRM_FORMAT_ABGR8888:
4734                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4735                 break;
4736         case DRM_FORMAT_NV21:
4737                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4738                 break;
4739         case DRM_FORMAT_NV12:
4740                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4741                 break;
4742         case DRM_FORMAT_P010:
4743                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4744                 break;
4745         case DRM_FORMAT_XRGB16161616F:
4746         case DRM_FORMAT_ARGB16161616F:
4747                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4748                 break;
4749         case DRM_FORMAT_XBGR16161616F:
4750         case DRM_FORMAT_ABGR16161616F:
4751                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4752                 break;
4753         case DRM_FORMAT_XRGB16161616:
4754         case DRM_FORMAT_ARGB16161616:
4755                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
4756                 break;
4757         case DRM_FORMAT_XBGR16161616:
4758         case DRM_FORMAT_ABGR16161616:
4759                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
4760                 break;
4761         default:
4762                 DRM_ERROR(
4763                         "Unsupported screen format %p4cc\n",
4764                         &fb->format->format);
4765                 return -EINVAL;
4766         }
4767
4768         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4769         case DRM_MODE_ROTATE_0:
4770                 plane_info->rotation = ROTATION_ANGLE_0;
4771                 break;
4772         case DRM_MODE_ROTATE_90:
4773                 plane_info->rotation = ROTATION_ANGLE_90;
4774                 break;
4775         case DRM_MODE_ROTATE_180:
4776                 plane_info->rotation = ROTATION_ANGLE_180;
4777                 break;
4778         case DRM_MODE_ROTATE_270:
4779                 plane_info->rotation = ROTATION_ANGLE_270;
4780                 break;
4781         default:
4782                 plane_info->rotation = ROTATION_ANGLE_0;
4783                 break;
4784         }
4785
4786
4787         plane_info->visible = true;
4788         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4789
4790         plane_info->layer_index = plane_state->normalized_zpos;
4791
4792         ret = fill_plane_color_attributes(plane_state, plane_info->format,
4793                                           &plane_info->color_space);
4794         if (ret)
4795                 return ret;
4796
4797         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4798                                            plane_info->rotation, tiling_flags,
4799                                            &plane_info->tiling_info,
4800                                            &plane_info->plane_size,
4801                                            &plane_info->dcc, address,
4802                                            tmz_surface, force_disable_dcc);
4803         if (ret)
4804                 return ret;
4805
4806         fill_blending_from_plane_state(
4807                 plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha,
4808                 &plane_info->global_alpha, &plane_info->global_alpha_value);
4809
4810         return 0;
4811 }
4812
4813 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4814                                     struct dc_plane_state *dc_plane_state,
4815                                     struct drm_plane_state *plane_state,
4816                                     struct drm_crtc_state *crtc_state)
4817 {
4818         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4819         struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4820         struct dc_scaling_info scaling_info;
4821         struct dc_plane_info plane_info;
4822         int ret;
4823         bool force_disable_dcc = false;
4824
4825         ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
4826         if (ret)
4827                 return ret;
4828
4829         dc_plane_state->src_rect = scaling_info.src_rect;
4830         dc_plane_state->dst_rect = scaling_info.dst_rect;
4831         dc_plane_state->clip_rect = scaling_info.clip_rect;
4832         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4833
4834         force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4835         ret = fill_dc_plane_info_and_addr(adev, plane_state,
4836                                           afb->tiling_flags,
4837                                           &plane_info,
4838                                           &dc_plane_state->address,
4839                                           afb->tmz_surface,
4840                                           force_disable_dcc);
4841         if (ret)
4842                 return ret;
4843
4844         dc_plane_state->format = plane_info.format;
4845         dc_plane_state->color_space = plane_info.color_space;
4846         dc_plane_state->format = plane_info.format;
4847         dc_plane_state->plane_size = plane_info.plane_size;
4848         dc_plane_state->rotation = plane_info.rotation;
4849         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4850         dc_plane_state->stereo_format = plane_info.stereo_format;
4851         dc_plane_state->tiling_info = plane_info.tiling_info;
4852         dc_plane_state->visible = plane_info.visible;
4853         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4854         dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha;
4855         dc_plane_state->global_alpha = plane_info.global_alpha;
4856         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4857         dc_plane_state->dcc = plane_info.dcc;
4858         dc_plane_state->layer_index = plane_info.layer_index;
4859         dc_plane_state->flip_int_enabled = true;
4860
4861         /*
4862          * Always set input transfer function, since plane state is refreshed
4863          * every time.
4864          */
4865         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4866         if (ret)
4867                 return ret;
4868
4869         return 0;
4870 }
4871
4872 static inline void fill_dc_dirty_rect(struct drm_plane *plane,
4873                                       struct rect *dirty_rect, int32_t x,
4874                                       int32_t y, int32_t width, int32_t height,
4875                                       int *i, bool ffu)
4876 {
4877         if (*i > DC_MAX_DIRTY_RECTS)
4878                 return;
4879
4880         if (*i == DC_MAX_DIRTY_RECTS)
4881                 goto out;
4882
4883         dirty_rect->x = x;
4884         dirty_rect->y = y;
4885         dirty_rect->width = width;
4886         dirty_rect->height = height;
4887
4888         if (ffu)
4889                 drm_dbg(plane->dev,
4890                         "[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n",
4891                         plane->base.id, width, height);
4892         else
4893                 drm_dbg(plane->dev,
4894                         "[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)",
4895                         plane->base.id, x, y, width, height);
4896
4897 out:
4898         (*i)++;
4899 }
4900
4901 /**
4902  * fill_dc_dirty_rects() - Fill DC dirty regions for PSR selective updates
4903  *
4904  * @plane: DRM plane containing dirty regions that need to be flushed to the eDP
4905  *         remote fb
4906  * @old_plane_state: Old state of @plane
4907  * @new_plane_state: New state of @plane
4908  * @crtc_state: New state of CRTC connected to the @plane
4909  * @flip_addrs: DC flip tracking struct, which also tracts dirty rects
4910  *
4911  * For PSR SU, DC informs the DMUB uController of dirty rectangle regions
4912  * (referred to as "damage clips" in DRM nomenclature) that require updating on
4913  * the eDP remote buffer. The responsibility of specifying the dirty regions is
4914  * amdgpu_dm's.
4915  *
4916  * A damage-aware DRM client should fill the FB_DAMAGE_CLIPS property on the
4917  * plane with regions that require flushing to the eDP remote buffer. In
4918  * addition, certain use cases - such as cursor and multi-plane overlay (MPO) -
4919  * implicitly provide damage clips without any client support via the plane
4920  * bounds.
4921  */
4922 static void fill_dc_dirty_rects(struct drm_plane *plane,
4923                                 struct drm_plane_state *old_plane_state,
4924                                 struct drm_plane_state *new_plane_state,
4925                                 struct drm_crtc_state *crtc_state,
4926                                 struct dc_flip_addrs *flip_addrs)
4927 {
4928         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4929         struct rect *dirty_rects = flip_addrs->dirty_rects;
4930         uint32_t num_clips;
4931         struct drm_mode_rect *clips;
4932         bool bb_changed;
4933         bool fb_changed;
4934         uint32_t i = 0;
4935
4936         /*
4937          * Cursor plane has it's own dirty rect update interface. See
4938          * dcn10_dmub_update_cursor_data and dmub_cmd_update_cursor_info_data
4939          */
4940         if (plane->type == DRM_PLANE_TYPE_CURSOR)
4941                 return;
4942
4943         num_clips = drm_plane_get_damage_clips_count(new_plane_state);
4944         clips = drm_plane_get_damage_clips(new_plane_state);
4945
4946         if (!dm_crtc_state->mpo_requested) {
4947                 if (!num_clips || num_clips > DC_MAX_DIRTY_RECTS)
4948                         goto ffu;
4949
4950                 for (; flip_addrs->dirty_rect_count < num_clips; clips++)
4951                         fill_dc_dirty_rect(new_plane_state->plane,
4952                                            &dirty_rects[i], clips->x1,
4953                                            clips->y1, clips->x2 - clips->x1,
4954                                            clips->y2 - clips->y1,
4955                                            &flip_addrs->dirty_rect_count,
4956                                            false);
4957                 return;
4958         }
4959
4960         /*
4961          * MPO is requested. Add entire plane bounding box to dirty rects if
4962          * flipped to or damaged.
4963          *
4964          * If plane is moved or resized, also add old bounding box to dirty
4965          * rects.
4966          */
4967         fb_changed = old_plane_state->fb->base.id !=
4968                      new_plane_state->fb->base.id;
4969         bb_changed = (old_plane_state->crtc_x != new_plane_state->crtc_x ||
4970                       old_plane_state->crtc_y != new_plane_state->crtc_y ||
4971                       old_plane_state->crtc_w != new_plane_state->crtc_w ||
4972                       old_plane_state->crtc_h != new_plane_state->crtc_h);
4973
4974         drm_dbg(plane->dev,
4975                 "[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n",
4976                 new_plane_state->plane->base.id,
4977                 bb_changed, fb_changed, num_clips);
4978
4979         if (bb_changed) {
4980                 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
4981                                    new_plane_state->crtc_x,
4982                                    new_plane_state->crtc_y,
4983                                    new_plane_state->crtc_w,
4984                                    new_plane_state->crtc_h, &i, false);
4985
4986                 /* Add old plane bounding-box if plane is moved or resized */
4987                 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
4988                                    old_plane_state->crtc_x,
4989                                    old_plane_state->crtc_y,
4990                                    old_plane_state->crtc_w,
4991                                    old_plane_state->crtc_h, &i, false);
4992         }
4993
4994         if (num_clips) {
4995                 for (; i < num_clips; clips++)
4996                         fill_dc_dirty_rect(new_plane_state->plane,
4997                                            &dirty_rects[i], clips->x1,
4998                                            clips->y1, clips->x2 - clips->x1,
4999                                            clips->y2 - clips->y1, &i, false);
5000         } else if (fb_changed && !bb_changed) {
5001                 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
5002                                    new_plane_state->crtc_x,
5003                                    new_plane_state->crtc_y,
5004                                    new_plane_state->crtc_w,
5005                                    new_plane_state->crtc_h, &i, false);
5006         }
5007
5008         if (i > DC_MAX_DIRTY_RECTS)
5009                 goto ffu;
5010
5011         flip_addrs->dirty_rect_count = i;
5012         return;
5013
5014 ffu:
5015         fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[0], 0, 0,
5016                            dm_crtc_state->base.mode.crtc_hdisplay,
5017                            dm_crtc_state->base.mode.crtc_vdisplay,
5018                            &flip_addrs->dirty_rect_count, true);
5019 }
5020
5021 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5022                                            const struct dm_connector_state *dm_state,
5023                                            struct dc_stream_state *stream)
5024 {
5025         enum amdgpu_rmx_type rmx_type;
5026
5027         struct rect src = { 0 }; /* viewport in composition space*/
5028         struct rect dst = { 0 }; /* stream addressable area */
5029
5030         /* no mode. nothing to be done */
5031         if (!mode)
5032                 return;
5033
5034         /* Full screen scaling by default */
5035         src.width = mode->hdisplay;
5036         src.height = mode->vdisplay;
5037         dst.width = stream->timing.h_addressable;
5038         dst.height = stream->timing.v_addressable;
5039
5040         if (dm_state) {
5041                 rmx_type = dm_state->scaling;
5042                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5043                         if (src.width * dst.height <
5044                                         src.height * dst.width) {
5045                                 /* height needs less upscaling/more downscaling */
5046                                 dst.width = src.width *
5047                                                 dst.height / src.height;
5048                         } else {
5049                                 /* width needs less upscaling/more downscaling */
5050                                 dst.height = src.height *
5051                                                 dst.width / src.width;
5052                         }
5053                 } else if (rmx_type == RMX_CENTER) {
5054                         dst = src;
5055                 }
5056
5057                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5058                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
5059
5060                 if (dm_state->underscan_enable) {
5061                         dst.x += dm_state->underscan_hborder / 2;
5062                         dst.y += dm_state->underscan_vborder / 2;
5063                         dst.width -= dm_state->underscan_hborder;
5064                         dst.height -= dm_state->underscan_vborder;
5065                 }
5066         }
5067
5068         stream->src = src;
5069         stream->dst = dst;
5070
5071         DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5072                       dst.x, dst.y, dst.width, dst.height);
5073
5074 }
5075
5076 static enum dc_color_depth
5077 convert_color_depth_from_display_info(const struct drm_connector *connector,
5078                                       bool is_y420, int requested_bpc)
5079 {
5080         uint8_t bpc;
5081
5082         if (is_y420) {
5083                 bpc = 8;
5084
5085                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5086                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5087                         bpc = 16;
5088                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5089                         bpc = 12;
5090                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5091                         bpc = 10;
5092         } else {
5093                 bpc = (uint8_t)connector->display_info.bpc;
5094                 /* Assume 8 bpc by default if no bpc is specified. */
5095                 bpc = bpc ? bpc : 8;
5096         }
5097
5098         if (requested_bpc > 0) {
5099                 /*
5100                  * Cap display bpc based on the user requested value.
5101                  *
5102                  * The value for state->max_bpc may not correctly updated
5103                  * depending on when the connector gets added to the state
5104                  * or if this was called outside of atomic check, so it
5105                  * can't be used directly.
5106                  */
5107                 bpc = min_t(u8, bpc, requested_bpc);
5108
5109                 /* Round down to the nearest even number. */
5110                 bpc = bpc - (bpc & 1);
5111         }
5112
5113         switch (bpc) {
5114         case 0:
5115                 /*
5116                  * Temporary Work around, DRM doesn't parse color depth for
5117                  * EDID revision before 1.4
5118                  * TODO: Fix edid parsing
5119                  */
5120                 return COLOR_DEPTH_888;
5121         case 6:
5122                 return COLOR_DEPTH_666;
5123         case 8:
5124                 return COLOR_DEPTH_888;
5125         case 10:
5126                 return COLOR_DEPTH_101010;
5127         case 12:
5128                 return COLOR_DEPTH_121212;
5129         case 14:
5130                 return COLOR_DEPTH_141414;
5131         case 16:
5132                 return COLOR_DEPTH_161616;
5133         default:
5134                 return COLOR_DEPTH_UNDEFINED;
5135         }
5136 }
5137
5138 static enum dc_aspect_ratio
5139 get_aspect_ratio(const struct drm_display_mode *mode_in)
5140 {
5141         /* 1-1 mapping, since both enums follow the HDMI spec. */
5142         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5143 }
5144
5145 static enum dc_color_space
5146 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5147 {
5148         enum dc_color_space color_space = COLOR_SPACE_SRGB;
5149
5150         switch (dc_crtc_timing->pixel_encoding) {
5151         case PIXEL_ENCODING_YCBCR422:
5152         case PIXEL_ENCODING_YCBCR444:
5153         case PIXEL_ENCODING_YCBCR420:
5154         {
5155                 /*
5156                  * 27030khz is the separation point between HDTV and SDTV
5157                  * according to HDMI spec, we use YCbCr709 and YCbCr601
5158                  * respectively
5159                  */
5160                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
5161                         if (dc_crtc_timing->flags.Y_ONLY)
5162                                 color_space =
5163                                         COLOR_SPACE_YCBCR709_LIMITED;
5164                         else
5165                                 color_space = COLOR_SPACE_YCBCR709;
5166                 } else {
5167                         if (dc_crtc_timing->flags.Y_ONLY)
5168                                 color_space =
5169                                         COLOR_SPACE_YCBCR601_LIMITED;
5170                         else
5171                                 color_space = COLOR_SPACE_YCBCR601;
5172                 }
5173
5174         }
5175         break;
5176         case PIXEL_ENCODING_RGB:
5177                 color_space = COLOR_SPACE_SRGB;
5178                 break;
5179
5180         default:
5181                 WARN_ON(1);
5182                 break;
5183         }
5184
5185         return color_space;
5186 }
5187
5188 static bool adjust_colour_depth_from_display_info(
5189         struct dc_crtc_timing *timing_out,
5190         const struct drm_display_info *info)
5191 {
5192         enum dc_color_depth depth = timing_out->display_color_depth;
5193         int normalized_clk;
5194         do {
5195                 normalized_clk = timing_out->pix_clk_100hz / 10;
5196                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5197                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5198                         normalized_clk /= 2;
5199                 /* Adjusting pix clock following on HDMI spec based on colour depth */
5200                 switch (depth) {
5201                 case COLOR_DEPTH_888:
5202                         break;
5203                 case COLOR_DEPTH_101010:
5204                         normalized_clk = (normalized_clk * 30) / 24;
5205                         break;
5206                 case COLOR_DEPTH_121212:
5207                         normalized_clk = (normalized_clk * 36) / 24;
5208                         break;
5209                 case COLOR_DEPTH_161616:
5210                         normalized_clk = (normalized_clk * 48) / 24;
5211                         break;
5212                 default:
5213                         /* The above depths are the only ones valid for HDMI. */
5214                         return false;
5215                 }
5216                 if (normalized_clk <= info->max_tmds_clock) {
5217                         timing_out->display_color_depth = depth;
5218                         return true;
5219                 }
5220         } while (--depth > COLOR_DEPTH_666);
5221         return false;
5222 }
5223
5224 static void fill_stream_properties_from_drm_display_mode(
5225         struct dc_stream_state *stream,
5226         const struct drm_display_mode *mode_in,
5227         const struct drm_connector *connector,
5228         const struct drm_connector_state *connector_state,
5229         const struct dc_stream_state *old_stream,
5230         int requested_bpc)
5231 {
5232         struct dc_crtc_timing *timing_out = &stream->timing;
5233         const struct drm_display_info *info = &connector->display_info;
5234         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5235         struct hdmi_vendor_infoframe hv_frame;
5236         struct hdmi_avi_infoframe avi_frame;
5237
5238         memset(&hv_frame, 0, sizeof(hv_frame));
5239         memset(&avi_frame, 0, sizeof(avi_frame));
5240
5241         timing_out->h_border_left = 0;
5242         timing_out->h_border_right = 0;
5243         timing_out->v_border_top = 0;
5244         timing_out->v_border_bottom = 0;
5245         /* TODO: un-hardcode */
5246         if (drm_mode_is_420_only(info, mode_in)
5247                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5248                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5249         else if (drm_mode_is_420_also(info, mode_in)
5250                         && aconnector->force_yuv420_output)
5251                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5252         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
5253                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5254                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5255         else
5256                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5257
5258         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5259         timing_out->display_color_depth = convert_color_depth_from_display_info(
5260                 connector,
5261                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5262                 requested_bpc);
5263         timing_out->scan_type = SCANNING_TYPE_NODATA;
5264         timing_out->hdmi_vic = 0;
5265
5266         if (old_stream) {
5267                 timing_out->vic = old_stream->timing.vic;
5268                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5269                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5270         } else {
5271                 timing_out->vic = drm_match_cea_mode(mode_in);
5272                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5273                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5274                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5275                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5276         }
5277
5278         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5279                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5280                 timing_out->vic = avi_frame.video_code;
5281                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5282                 timing_out->hdmi_vic = hv_frame.vic;
5283         }
5284
5285         if (is_freesync_video_mode(mode_in, aconnector)) {
5286                 timing_out->h_addressable = mode_in->hdisplay;
5287                 timing_out->h_total = mode_in->htotal;
5288                 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5289                 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5290                 timing_out->v_total = mode_in->vtotal;
5291                 timing_out->v_addressable = mode_in->vdisplay;
5292                 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5293                 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5294                 timing_out->pix_clk_100hz = mode_in->clock * 10;
5295         } else {
5296                 timing_out->h_addressable = mode_in->crtc_hdisplay;
5297                 timing_out->h_total = mode_in->crtc_htotal;
5298                 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5299                 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5300                 timing_out->v_total = mode_in->crtc_vtotal;
5301                 timing_out->v_addressable = mode_in->crtc_vdisplay;
5302                 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5303                 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5304                 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5305         }
5306
5307         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5308
5309         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5310         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5311         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5312                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5313                     drm_mode_is_420_also(info, mode_in) &&
5314                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5315                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5316                         adjust_colour_depth_from_display_info(timing_out, info);
5317                 }
5318         }
5319
5320         stream->output_color_space = get_output_color_space(timing_out);
5321 }
5322
5323 static void fill_audio_info(struct audio_info *audio_info,
5324                             const struct drm_connector *drm_connector,
5325                             const struct dc_sink *dc_sink)
5326 {
5327         int i = 0;
5328         int cea_revision = 0;
5329         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5330
5331         audio_info->manufacture_id = edid_caps->manufacturer_id;
5332         audio_info->product_id = edid_caps->product_id;
5333
5334         cea_revision = drm_connector->display_info.cea_rev;
5335
5336         strscpy(audio_info->display_name,
5337                 edid_caps->display_name,
5338                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5339
5340         if (cea_revision >= 3) {
5341                 audio_info->mode_count = edid_caps->audio_mode_count;
5342
5343                 for (i = 0; i < audio_info->mode_count; ++i) {
5344                         audio_info->modes[i].format_code =
5345                                         (enum audio_format_code)
5346                                         (edid_caps->audio_modes[i].format_code);
5347                         audio_info->modes[i].channel_count =
5348                                         edid_caps->audio_modes[i].channel_count;
5349                         audio_info->modes[i].sample_rates.all =
5350                                         edid_caps->audio_modes[i].sample_rate;
5351                         audio_info->modes[i].sample_size =
5352                                         edid_caps->audio_modes[i].sample_size;
5353                 }
5354         }
5355
5356         audio_info->flags.all = edid_caps->speaker_flags;
5357
5358         /* TODO: We only check for the progressive mode, check for interlace mode too */
5359         if (drm_connector->latency_present[0]) {
5360                 audio_info->video_latency = drm_connector->video_latency[0];
5361                 audio_info->audio_latency = drm_connector->audio_latency[0];
5362         }
5363
5364         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5365
5366 }
5367
5368 static void
5369 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5370                                       struct drm_display_mode *dst_mode)
5371 {
5372         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5373         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5374         dst_mode->crtc_clock = src_mode->crtc_clock;
5375         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5376         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5377         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5378         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5379         dst_mode->crtc_htotal = src_mode->crtc_htotal;
5380         dst_mode->crtc_hskew = src_mode->crtc_hskew;
5381         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5382         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5383         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5384         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5385         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5386 }
5387
5388 static void
5389 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5390                                         const struct drm_display_mode *native_mode,
5391                                         bool scale_enabled)
5392 {
5393         if (scale_enabled) {
5394                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5395         } else if (native_mode->clock == drm_mode->clock &&
5396                         native_mode->htotal == drm_mode->htotal &&
5397                         native_mode->vtotal == drm_mode->vtotal) {
5398                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5399         } else {
5400                 /* no scaling nor amdgpu inserted, no need to patch */
5401         }
5402 }
5403
5404 static struct dc_sink *
5405 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5406 {
5407         struct dc_sink_init_data sink_init_data = { 0 };
5408         struct dc_sink *sink = NULL;
5409         sink_init_data.link = aconnector->dc_link;
5410         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5411
5412         sink = dc_sink_create(&sink_init_data);
5413         if (!sink) {
5414                 DRM_ERROR("Failed to create sink!\n");
5415                 return NULL;
5416         }
5417         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5418
5419         return sink;
5420 }
5421
5422 static void set_multisync_trigger_params(
5423                 struct dc_stream_state *stream)
5424 {
5425         struct dc_stream_state *master = NULL;
5426
5427         if (stream->triggered_crtc_reset.enabled) {
5428                 master = stream->triggered_crtc_reset.event_source;
5429                 stream->triggered_crtc_reset.event =
5430                         master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5431                         CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5432                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5433         }
5434 }
5435
5436 static void set_master_stream(struct dc_stream_state *stream_set[],
5437                               int stream_count)
5438 {
5439         int j, highest_rfr = 0, master_stream = 0;
5440
5441         for (j = 0;  j < stream_count; j++) {
5442                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5443                         int refresh_rate = 0;
5444
5445                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5446                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5447                         if (refresh_rate > highest_rfr) {
5448                                 highest_rfr = refresh_rate;
5449                                 master_stream = j;
5450                         }
5451                 }
5452         }
5453         for (j = 0;  j < stream_count; j++) {
5454                 if (stream_set[j])
5455                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5456         }
5457 }
5458
5459 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5460 {
5461         int i = 0;
5462         struct dc_stream_state *stream;
5463
5464         if (context->stream_count < 2)
5465                 return;
5466         for (i = 0; i < context->stream_count ; i++) {
5467                 if (!context->streams[i])
5468                         continue;
5469                 /*
5470                  * TODO: add a function to read AMD VSDB bits and set
5471                  * crtc_sync_master.multi_sync_enabled flag
5472                  * For now it's set to false
5473                  */
5474         }
5475
5476         set_master_stream(context->streams, context->stream_count);
5477
5478         for (i = 0; i < context->stream_count ; i++) {
5479                 stream = context->streams[i];
5480
5481                 if (!stream)
5482                         continue;
5483
5484                 set_multisync_trigger_params(stream);
5485         }
5486 }
5487
5488 /**
5489  * DOC: FreeSync Video
5490  *
5491  * When a userspace application wants to play a video, the content follows a
5492  * standard format definition that usually specifies the FPS for that format.
5493  * The below list illustrates some video format and the expected FPS,
5494  * respectively:
5495  *
5496  * - TV/NTSC (23.976 FPS)
5497  * - Cinema (24 FPS)
5498  * - TV/PAL (25 FPS)
5499  * - TV/NTSC (29.97 FPS)
5500  * - TV/NTSC (30 FPS)
5501  * - Cinema HFR (48 FPS)
5502  * - TV/PAL (50 FPS)
5503  * - Commonly used (60 FPS)
5504  * - Multiples of 24 (48,72,96 FPS)
5505  *
5506  * The list of standards video format is not huge and can be added to the
5507  * connector modeset list beforehand. With that, userspace can leverage
5508  * FreeSync to extends the front porch in order to attain the target refresh
5509  * rate. Such a switch will happen seamlessly, without screen blanking or
5510  * reprogramming of the output in any other way. If the userspace requests a
5511  * modesetting change compatible with FreeSync modes that only differ in the
5512  * refresh rate, DC will skip the full update and avoid blink during the
5513  * transition. For example, the video player can change the modesetting from
5514  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
5515  * causing any display blink. This same concept can be applied to a mode
5516  * setting change.
5517  */
5518 static struct drm_display_mode *
5519 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5520                 bool use_probed_modes)
5521 {
5522         struct drm_display_mode *m, *m_pref = NULL;
5523         u16 current_refresh, highest_refresh;
5524         struct list_head *list_head = use_probed_modes ?
5525                 &aconnector->base.probed_modes :
5526                 &aconnector->base.modes;
5527
5528         if (aconnector->freesync_vid_base.clock != 0)
5529                 return &aconnector->freesync_vid_base;
5530
5531         /* Find the preferred mode */
5532         list_for_each_entry (m, list_head, head) {
5533                 if (m->type & DRM_MODE_TYPE_PREFERRED) {
5534                         m_pref = m;
5535                         break;
5536                 }
5537         }
5538
5539         if (!m_pref) {
5540                 /* Probably an EDID with no preferred mode. Fallback to first entry */
5541                 m_pref = list_first_entry_or_null(
5542                                 &aconnector->base.modes, struct drm_display_mode, head);
5543                 if (!m_pref) {
5544                         DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5545                         return NULL;
5546                 }
5547         }
5548
5549         highest_refresh = drm_mode_vrefresh(m_pref);
5550
5551         /*
5552          * Find the mode with highest refresh rate with same resolution.
5553          * For some monitors, preferred mode is not the mode with highest
5554          * supported refresh rate.
5555          */
5556         list_for_each_entry (m, list_head, head) {
5557                 current_refresh  = drm_mode_vrefresh(m);
5558
5559                 if (m->hdisplay == m_pref->hdisplay &&
5560                     m->vdisplay == m_pref->vdisplay &&
5561                     highest_refresh < current_refresh) {
5562                         highest_refresh = current_refresh;
5563                         m_pref = m;
5564                 }
5565         }
5566
5567         drm_mode_copy(&aconnector->freesync_vid_base, m_pref);
5568         return m_pref;
5569 }
5570
5571 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
5572                 struct amdgpu_dm_connector *aconnector)
5573 {
5574         struct drm_display_mode *high_mode;
5575         int timing_diff;
5576
5577         high_mode = get_highest_refresh_rate_mode(aconnector, false);
5578         if (!high_mode || !mode)
5579                 return false;
5580
5581         timing_diff = high_mode->vtotal - mode->vtotal;
5582
5583         if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5584             high_mode->hdisplay != mode->hdisplay ||
5585             high_mode->vdisplay != mode->vdisplay ||
5586             high_mode->hsync_start != mode->hsync_start ||
5587             high_mode->hsync_end != mode->hsync_end ||
5588             high_mode->htotal != mode->htotal ||
5589             high_mode->hskew != mode->hskew ||
5590             high_mode->vscan != mode->vscan ||
5591             high_mode->vsync_start - mode->vsync_start != timing_diff ||
5592             high_mode->vsync_end - mode->vsync_end != timing_diff)
5593                 return false;
5594         else
5595                 return true;
5596 }
5597
5598 #if defined(CONFIG_DRM_AMD_DC_DCN)
5599 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
5600                             struct dc_sink *sink, struct dc_stream_state *stream,
5601                             struct dsc_dec_dpcd_caps *dsc_caps)
5602 {
5603         stream->timing.flags.DSC = 0;
5604         dsc_caps->is_dsc_supported = false;
5605
5606         if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
5607             sink->sink_signal == SIGNAL_TYPE_EDP)) {
5608                 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
5609                         sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
5610                         dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5611                                 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5612                                 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5613                                 dsc_caps);
5614         }
5615 }
5616
5617
5618 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
5619                                     struct dc_sink *sink, struct dc_stream_state *stream,
5620                                     struct dsc_dec_dpcd_caps *dsc_caps,
5621                                     uint32_t max_dsc_target_bpp_limit_override)
5622 {
5623         const struct dc_link_settings *verified_link_cap = NULL;
5624         uint32_t link_bw_in_kbps;
5625         uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
5626         struct dc *dc = sink->ctx->dc;
5627         struct dc_dsc_bw_range bw_range = {0};
5628         struct dc_dsc_config dsc_cfg = {0};
5629
5630         verified_link_cap = dc_link_get_link_cap(stream->link);
5631         link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
5632         edp_min_bpp_x16 = 8 * 16;
5633         edp_max_bpp_x16 = 8 * 16;
5634
5635         if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
5636                 edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
5637
5638         if (edp_max_bpp_x16 < edp_min_bpp_x16)
5639                 edp_min_bpp_x16 = edp_max_bpp_x16;
5640
5641         if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
5642                                 dc->debug.dsc_min_slice_height_override,
5643                                 edp_min_bpp_x16, edp_max_bpp_x16,
5644                                 dsc_caps,
5645                                 &stream->timing,
5646                                 &bw_range)) {
5647
5648                 if (bw_range.max_kbps < link_bw_in_kbps) {
5649                         if (dc_dsc_compute_config(dc->res_pool->dscs[0],
5650                                         dsc_caps,
5651                                         dc->debug.dsc_min_slice_height_override,
5652                                         max_dsc_target_bpp_limit_override,
5653                                         0,
5654                                         &stream->timing,
5655                                         &dsc_cfg)) {
5656                                 stream->timing.dsc_cfg = dsc_cfg;
5657                                 stream->timing.flags.DSC = 1;
5658                                 stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
5659                         }
5660                         return;
5661                 }
5662         }
5663
5664         if (dc_dsc_compute_config(dc->res_pool->dscs[0],
5665                                 dsc_caps,
5666                                 dc->debug.dsc_min_slice_height_override,
5667                                 max_dsc_target_bpp_limit_override,
5668                                 link_bw_in_kbps,
5669                                 &stream->timing,
5670                                 &dsc_cfg)) {
5671                 stream->timing.dsc_cfg = dsc_cfg;
5672                 stream->timing.flags.DSC = 1;
5673         }
5674 }
5675
5676
5677 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
5678                                         struct dc_sink *sink, struct dc_stream_state *stream,
5679                                         struct dsc_dec_dpcd_caps *dsc_caps)
5680 {
5681         struct drm_connector *drm_connector = &aconnector->base;
5682         uint32_t link_bandwidth_kbps;
5683         struct dc *dc = sink->ctx->dc;
5684         uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
5685         uint32_t dsc_max_supported_bw_in_kbps;
5686         uint32_t max_dsc_target_bpp_limit_override =
5687                 drm_connector->display_info.max_dsc_bpp;
5688
5689         link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5690                                                         dc_link_get_link_cap(aconnector->dc_link));
5691
5692         /* Set DSC policy according to dsc_clock_en */
5693         dc_dsc_policy_set_enable_dsc_when_not_needed(
5694                 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5695
5696         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP &&
5697             !aconnector->dc_link->panel_config.dsc.disable_dsc_edp &&
5698             dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
5699
5700                 apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
5701
5702         } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5703                 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
5704                         if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5705                                                 dsc_caps,
5706                                                 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5707                                                 max_dsc_target_bpp_limit_override,
5708                                                 link_bandwidth_kbps,
5709                                                 &stream->timing,
5710                                                 &stream->timing.dsc_cfg)) {
5711                                 stream->timing.flags.DSC = 1;
5712                                 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
5713                         }
5714                 } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
5715                         timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
5716                         max_supported_bw_in_kbps = link_bandwidth_kbps;
5717                         dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
5718
5719                         if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
5720                                         max_supported_bw_in_kbps > 0 &&
5721                                         dsc_max_supported_bw_in_kbps > 0)
5722                                 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5723                                                 dsc_caps,
5724                                                 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5725                                                 max_dsc_target_bpp_limit_override,
5726                                                 dsc_max_supported_bw_in_kbps,
5727                                                 &stream->timing,
5728                                                 &stream->timing.dsc_cfg)) {
5729                                         stream->timing.flags.DSC = 1;
5730                                         DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
5731                                                                          __func__, drm_connector->name);
5732                                 }
5733                 }
5734         }
5735
5736         /* Overwrite the stream flag if DSC is enabled through debugfs */
5737         if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5738                 stream->timing.flags.DSC = 1;
5739
5740         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5741                 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5742
5743         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5744                 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5745
5746         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5747                 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5748 }
5749 #endif /* CONFIG_DRM_AMD_DC_DCN */
5750
5751 static struct dc_stream_state *
5752 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5753                        const struct drm_display_mode *drm_mode,
5754                        const struct dm_connector_state *dm_state,
5755                        const struct dc_stream_state *old_stream,
5756                        int requested_bpc)
5757 {
5758         struct drm_display_mode *preferred_mode = NULL;
5759         struct drm_connector *drm_connector;
5760         const struct drm_connector_state *con_state =
5761                 dm_state ? &dm_state->base : NULL;
5762         struct dc_stream_state *stream = NULL;
5763         struct drm_display_mode mode;
5764         struct drm_display_mode saved_mode;
5765         struct drm_display_mode *freesync_mode = NULL;
5766         bool native_mode_found = false;
5767         bool recalculate_timing = false;
5768         bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5769         int mode_refresh;
5770         int preferred_refresh = 0;
5771         enum color_transfer_func tf = TRANSFER_FUNC_UNKNOWN;
5772 #if defined(CONFIG_DRM_AMD_DC_DCN)
5773         struct dsc_dec_dpcd_caps dsc_caps;
5774 #endif
5775
5776         struct dc_sink *sink = NULL;
5777
5778         drm_mode_init(&mode, drm_mode);
5779         memset(&saved_mode, 0, sizeof(saved_mode));
5780
5781         if (aconnector == NULL) {
5782                 DRM_ERROR("aconnector is NULL!\n");
5783                 return stream;
5784         }
5785
5786         drm_connector = &aconnector->base;
5787
5788         if (!aconnector->dc_sink) {
5789                 sink = create_fake_sink(aconnector);
5790                 if (!sink)
5791                         return stream;
5792         } else {
5793                 sink = aconnector->dc_sink;
5794                 dc_sink_retain(sink);
5795         }
5796
5797         stream = dc_create_stream_for_sink(sink);
5798
5799         if (stream == NULL) {
5800                 DRM_ERROR("Failed to create stream for sink!\n");
5801                 goto finish;
5802         }
5803
5804         stream->dm_stream_context = aconnector;
5805
5806         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5807                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5808
5809         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5810                 /* Search for preferred mode */
5811                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5812                         native_mode_found = true;
5813                         break;
5814                 }
5815         }
5816         if (!native_mode_found)
5817                 preferred_mode = list_first_entry_or_null(
5818                                 &aconnector->base.modes,
5819                                 struct drm_display_mode,
5820                                 head);
5821
5822         mode_refresh = drm_mode_vrefresh(&mode);
5823
5824         if (preferred_mode == NULL) {
5825                 /*
5826                  * This may not be an error, the use case is when we have no
5827                  * usermode calls to reset and set mode upon hotplug. In this
5828                  * case, we call set mode ourselves to restore the previous mode
5829                  * and the modelist may not be filled in in time.
5830                  */
5831                 DRM_DEBUG_DRIVER("No preferred mode found\n");
5832         } else {
5833                 recalculate_timing = amdgpu_freesync_vid_mode &&
5834                                  is_freesync_video_mode(&mode, aconnector);
5835                 if (recalculate_timing) {
5836                         freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5837                         drm_mode_copy(&saved_mode, &mode);
5838                         drm_mode_copy(&mode, freesync_mode);
5839                 } else {
5840                         decide_crtc_timing_for_drm_display_mode(
5841                                         &mode, preferred_mode, scale);
5842
5843                         preferred_refresh = drm_mode_vrefresh(preferred_mode);
5844                 }
5845         }
5846
5847         if (recalculate_timing)
5848                 drm_mode_set_crtcinfo(&saved_mode, 0);
5849         else if (!dm_state)
5850                 drm_mode_set_crtcinfo(&mode, 0);
5851
5852         /*
5853         * If scaling is enabled and refresh rate didn't change
5854         * we copy the vic and polarities of the old timings
5855         */
5856         if (!scale || mode_refresh != preferred_refresh)
5857                 fill_stream_properties_from_drm_display_mode(
5858                         stream, &mode, &aconnector->base, con_state, NULL,
5859                         requested_bpc);
5860         else
5861                 fill_stream_properties_from_drm_display_mode(
5862                         stream, &mode, &aconnector->base, con_state, old_stream,
5863                         requested_bpc);
5864
5865 #if defined(CONFIG_DRM_AMD_DC_DCN)
5866         /* SST DSC determination policy */
5867         update_dsc_caps(aconnector, sink, stream, &dsc_caps);
5868         if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
5869                 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
5870 #endif
5871
5872         update_stream_scaling_settings(&mode, dm_state, stream);
5873
5874         fill_audio_info(
5875                 &stream->audio_info,
5876                 drm_connector,
5877                 sink);
5878
5879         update_stream_signal(stream, sink);
5880
5881         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5882                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5883
5884         if (stream->link->psr_settings.psr_feature_enabled) {
5885                 //
5886                 // should decide stream support vsc sdp colorimetry capability
5887                 // before building vsc info packet
5888                 //
5889                 stream->use_vsc_sdp_for_colorimetry = false;
5890                 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5891                         stream->use_vsc_sdp_for_colorimetry =
5892                                 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5893                 } else {
5894                         if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5895                                 stream->use_vsc_sdp_for_colorimetry = true;
5896                 }
5897                 if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22)
5898                         tf = TRANSFER_FUNC_GAMMA_22;
5899                 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf);
5900                 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
5901
5902         }
5903 finish:
5904         dc_sink_release(sink);
5905
5906         return stream;
5907 }
5908
5909 static enum drm_connector_status
5910 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5911 {
5912         bool connected;
5913         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5914
5915         /*
5916          * Notes:
5917          * 1. This interface is NOT called in context of HPD irq.
5918          * 2. This interface *is called* in context of user-mode ioctl. Which
5919          * makes it a bad place for *any* MST-related activity.
5920          */
5921
5922         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5923             !aconnector->fake_enable)
5924                 connected = (aconnector->dc_sink != NULL);
5925         else
5926                 connected = (aconnector->base.force == DRM_FORCE_ON ||
5927                                 aconnector->base.force == DRM_FORCE_ON_DIGITAL);
5928
5929         update_subconnector_property(aconnector);
5930
5931         return (connected ? connector_status_connected :
5932                         connector_status_disconnected);
5933 }
5934
5935 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5936                                             struct drm_connector_state *connector_state,
5937                                             struct drm_property *property,
5938                                             uint64_t val)
5939 {
5940         struct drm_device *dev = connector->dev;
5941         struct amdgpu_device *adev = drm_to_adev(dev);
5942         struct dm_connector_state *dm_old_state =
5943                 to_dm_connector_state(connector->state);
5944         struct dm_connector_state *dm_new_state =
5945                 to_dm_connector_state(connector_state);
5946
5947         int ret = -EINVAL;
5948
5949         if (property == dev->mode_config.scaling_mode_property) {
5950                 enum amdgpu_rmx_type rmx_type;
5951
5952                 switch (val) {
5953                 case DRM_MODE_SCALE_CENTER:
5954                         rmx_type = RMX_CENTER;
5955                         break;
5956                 case DRM_MODE_SCALE_ASPECT:
5957                         rmx_type = RMX_ASPECT;
5958                         break;
5959                 case DRM_MODE_SCALE_FULLSCREEN:
5960                         rmx_type = RMX_FULL;
5961                         break;
5962                 case DRM_MODE_SCALE_NONE:
5963                 default:
5964                         rmx_type = RMX_OFF;
5965                         break;
5966                 }
5967
5968                 if (dm_old_state->scaling == rmx_type)
5969                         return 0;
5970
5971                 dm_new_state->scaling = rmx_type;
5972                 ret = 0;
5973         } else if (property == adev->mode_info.underscan_hborder_property) {
5974                 dm_new_state->underscan_hborder = val;
5975                 ret = 0;
5976         } else if (property == adev->mode_info.underscan_vborder_property) {
5977                 dm_new_state->underscan_vborder = val;
5978                 ret = 0;
5979         } else if (property == adev->mode_info.underscan_property) {
5980                 dm_new_state->underscan_enable = val;
5981                 ret = 0;
5982         } else if (property == adev->mode_info.abm_level_property) {
5983                 dm_new_state->abm_level = val;
5984                 ret = 0;
5985         }
5986
5987         return ret;
5988 }
5989
5990 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5991                                             const struct drm_connector_state *state,
5992                                             struct drm_property *property,
5993                                             uint64_t *val)
5994 {
5995         struct drm_device *dev = connector->dev;
5996         struct amdgpu_device *adev = drm_to_adev(dev);
5997         struct dm_connector_state *dm_state =
5998                 to_dm_connector_state(state);
5999         int ret = -EINVAL;
6000
6001         if (property == dev->mode_config.scaling_mode_property) {
6002                 switch (dm_state->scaling) {
6003                 case RMX_CENTER:
6004                         *val = DRM_MODE_SCALE_CENTER;
6005                         break;
6006                 case RMX_ASPECT:
6007                         *val = DRM_MODE_SCALE_ASPECT;
6008                         break;
6009                 case RMX_FULL:
6010                         *val = DRM_MODE_SCALE_FULLSCREEN;
6011                         break;
6012                 case RMX_OFF:
6013                 default:
6014                         *val = DRM_MODE_SCALE_NONE;
6015                         break;
6016                 }
6017                 ret = 0;
6018         } else if (property == adev->mode_info.underscan_hborder_property) {
6019                 *val = dm_state->underscan_hborder;
6020                 ret = 0;
6021         } else if (property == adev->mode_info.underscan_vborder_property) {
6022                 *val = dm_state->underscan_vborder;
6023                 ret = 0;
6024         } else if (property == adev->mode_info.underscan_property) {
6025                 *val = dm_state->underscan_enable;
6026                 ret = 0;
6027         } else if (property == adev->mode_info.abm_level_property) {
6028                 *val = dm_state->abm_level;
6029                 ret = 0;
6030         }
6031
6032         return ret;
6033 }
6034
6035 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6036 {
6037         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6038
6039         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6040 }
6041
6042 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6043 {
6044         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6045         const struct dc_link *link = aconnector->dc_link;
6046         struct amdgpu_device *adev = drm_to_adev(connector->dev);
6047         struct amdgpu_display_manager *dm = &adev->dm;
6048         int i;
6049
6050         /*
6051          * Call only if mst_mgr was initialized before since it's not done
6052          * for all connector types.
6053          */
6054         if (aconnector->mst_mgr.dev)
6055                 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6056
6057 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6058         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6059         for (i = 0; i < dm->num_of_edps; i++) {
6060                 if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6061                         backlight_device_unregister(dm->backlight_dev[i]);
6062                         dm->backlight_dev[i] = NULL;
6063                 }
6064         }
6065 #endif
6066
6067         if (aconnector->dc_em_sink)
6068                 dc_sink_release(aconnector->dc_em_sink);
6069         aconnector->dc_em_sink = NULL;
6070         if (aconnector->dc_sink)
6071                 dc_sink_release(aconnector->dc_sink);
6072         aconnector->dc_sink = NULL;
6073
6074         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6075         drm_connector_unregister(connector);
6076         drm_connector_cleanup(connector);
6077         if (aconnector->i2c) {
6078                 i2c_del_adapter(&aconnector->i2c->base);
6079                 kfree(aconnector->i2c);
6080         }
6081         kfree(aconnector->dm_dp_aux.aux.name);
6082
6083         kfree(connector);
6084 }
6085
6086 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6087 {
6088         struct dm_connector_state *state =
6089                 to_dm_connector_state(connector->state);
6090
6091         if (connector->state)
6092                 __drm_atomic_helper_connector_destroy_state(connector->state);
6093
6094         kfree(state);
6095
6096         state = kzalloc(sizeof(*state), GFP_KERNEL);
6097
6098         if (state) {
6099                 state->scaling = RMX_OFF;
6100                 state->underscan_enable = false;
6101                 state->underscan_hborder = 0;
6102                 state->underscan_vborder = 0;
6103                 state->base.max_requested_bpc = 8;
6104                 state->vcpi_slots = 0;
6105                 state->pbn = 0;
6106
6107                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6108                         state->abm_level = amdgpu_dm_abm_level;
6109
6110                 __drm_atomic_helper_connector_reset(connector, &state->base);
6111         }
6112 }
6113
6114 struct drm_connector_state *
6115 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6116 {
6117         struct dm_connector_state *state =
6118                 to_dm_connector_state(connector->state);
6119
6120         struct dm_connector_state *new_state =
6121                         kmemdup(state, sizeof(*state), GFP_KERNEL);
6122
6123         if (!new_state)
6124                 return NULL;
6125
6126         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6127
6128         new_state->freesync_capable = state->freesync_capable;
6129         new_state->abm_level = state->abm_level;
6130         new_state->scaling = state->scaling;
6131         new_state->underscan_enable = state->underscan_enable;
6132         new_state->underscan_hborder = state->underscan_hborder;
6133         new_state->underscan_vborder = state->underscan_vborder;
6134         new_state->vcpi_slots = state->vcpi_slots;
6135         new_state->pbn = state->pbn;
6136         return &new_state->base;
6137 }
6138
6139 static int
6140 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6141 {
6142         struct amdgpu_dm_connector *amdgpu_dm_connector =
6143                 to_amdgpu_dm_connector(connector);
6144         int r;
6145
6146         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6147             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6148                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6149                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6150                 if (r)
6151                         return r;
6152         }
6153
6154 #if defined(CONFIG_DEBUG_FS)
6155         connector_debugfs_init(amdgpu_dm_connector);
6156 #endif
6157
6158         return 0;
6159 }
6160
6161 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6162         .reset = amdgpu_dm_connector_funcs_reset,
6163         .detect = amdgpu_dm_connector_detect,
6164         .fill_modes = drm_helper_probe_single_connector_modes,
6165         .destroy = amdgpu_dm_connector_destroy,
6166         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6167         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6168         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6169         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6170         .late_register = amdgpu_dm_connector_late_register,
6171         .early_unregister = amdgpu_dm_connector_unregister
6172 };
6173
6174 static int get_modes(struct drm_connector *connector)
6175 {
6176         return amdgpu_dm_connector_get_modes(connector);
6177 }
6178
6179 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6180 {
6181         struct dc_sink_init_data init_params = {
6182                         .link = aconnector->dc_link,
6183                         .sink_signal = SIGNAL_TYPE_VIRTUAL
6184         };
6185         struct edid *edid;
6186
6187         if (!aconnector->base.edid_blob_ptr) {
6188                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6189                                 aconnector->base.name);
6190
6191                 aconnector->base.force = DRM_FORCE_OFF;
6192                 return;
6193         }
6194
6195         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6196
6197         aconnector->edid = edid;
6198
6199         aconnector->dc_em_sink = dc_link_add_remote_sink(
6200                 aconnector->dc_link,
6201                 (uint8_t *)edid,
6202                 (edid->extensions + 1) * EDID_LENGTH,
6203                 &init_params);
6204
6205         if (aconnector->base.force == DRM_FORCE_ON) {
6206                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6207                 aconnector->dc_link->local_sink :
6208                 aconnector->dc_em_sink;
6209                 dc_sink_retain(aconnector->dc_sink);
6210         }
6211 }
6212
6213 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6214 {
6215         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6216
6217         /*
6218          * In case of headless boot with force on for DP managed connector
6219          * Those settings have to be != 0 to get initial modeset
6220          */
6221         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6222                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6223                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6224         }
6225
6226         create_eml_sink(aconnector);
6227 }
6228
6229 static enum dc_status dm_validate_stream_and_context(struct dc *dc,
6230                                                 struct dc_stream_state *stream)
6231 {
6232         enum dc_status dc_result = DC_ERROR_UNEXPECTED;
6233         struct dc_plane_state *dc_plane_state = NULL;
6234         struct dc_state *dc_state = NULL;
6235
6236         if (!stream)
6237                 goto cleanup;
6238
6239         dc_plane_state = dc_create_plane_state(dc);
6240         if (!dc_plane_state)
6241                 goto cleanup;
6242
6243         dc_state = dc_create_state(dc);
6244         if (!dc_state)
6245                 goto cleanup;
6246
6247         /* populate stream to plane */
6248         dc_plane_state->src_rect.height  = stream->src.height;
6249         dc_plane_state->src_rect.width   = stream->src.width;
6250         dc_plane_state->dst_rect.height  = stream->src.height;
6251         dc_plane_state->dst_rect.width   = stream->src.width;
6252         dc_plane_state->clip_rect.height = stream->src.height;
6253         dc_plane_state->clip_rect.width  = stream->src.width;
6254         dc_plane_state->plane_size.surface_pitch = ((stream->src.width + 255) / 256) * 256;
6255         dc_plane_state->plane_size.surface_size.height = stream->src.height;
6256         dc_plane_state->plane_size.surface_size.width  = stream->src.width;
6257         dc_plane_state->plane_size.chroma_size.height  = stream->src.height;
6258         dc_plane_state->plane_size.chroma_size.width   = stream->src.width;
6259         dc_plane_state->tiling_info.gfx9.swizzle =  DC_SW_UNKNOWN;
6260         dc_plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
6261         dc_plane_state->tiling_info.gfx9.swizzle = DC_SW_UNKNOWN;
6262         dc_plane_state->rotation = ROTATION_ANGLE_0;
6263         dc_plane_state->is_tiling_rotated = false;
6264         dc_plane_state->tiling_info.gfx8.array_mode = DC_ARRAY_LINEAR_GENERAL;
6265
6266         dc_result = dc_validate_stream(dc, stream);
6267         if (dc_result == DC_OK)
6268                 dc_result = dc_validate_plane(dc, dc_plane_state);
6269
6270         if (dc_result == DC_OK)
6271                 dc_result = dc_add_stream_to_ctx(dc, dc_state, stream);
6272
6273         if (dc_result == DC_OK && !dc_add_plane_to_context(
6274                                                 dc,
6275                                                 stream,
6276                                                 dc_plane_state,
6277                                                 dc_state))
6278                 dc_result = DC_FAIL_ATTACH_SURFACES;
6279
6280         if (dc_result == DC_OK)
6281                 dc_result = dc_validate_global_state(dc, dc_state, true);
6282
6283 cleanup:
6284         if (dc_state)
6285                 dc_release_state(dc_state);
6286
6287         if (dc_plane_state)
6288                 dc_plane_state_release(dc_plane_state);
6289
6290         return dc_result;
6291 }
6292
6293 struct dc_stream_state *
6294 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6295                                 const struct drm_display_mode *drm_mode,
6296                                 const struct dm_connector_state *dm_state,
6297                                 const struct dc_stream_state *old_stream)
6298 {
6299         struct drm_connector *connector = &aconnector->base;
6300         struct amdgpu_device *adev = drm_to_adev(connector->dev);
6301         struct dc_stream_state *stream;
6302         const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6303         int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6304         enum dc_status dc_result = DC_OK;
6305
6306         do {
6307                 stream = create_stream_for_sink(aconnector, drm_mode,
6308                                                 dm_state, old_stream,
6309                                                 requested_bpc);
6310                 if (stream == NULL) {
6311                         DRM_ERROR("Failed to create stream for sink!\n");
6312                         break;
6313                 }
6314
6315                 dc_result = dc_validate_stream(adev->dm.dc, stream);
6316                 if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
6317                         dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream);
6318
6319                 if (dc_result == DC_OK)
6320                         dc_result = dm_validate_stream_and_context(adev->dm.dc, stream);
6321
6322                 if (dc_result != DC_OK) {
6323                         DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6324                                       drm_mode->hdisplay,
6325                                       drm_mode->vdisplay,
6326                                       drm_mode->clock,
6327                                       dc_result,
6328                                       dc_status_to_str(dc_result));
6329
6330                         dc_stream_release(stream);
6331                         stream = NULL;
6332                         requested_bpc -= 2; /* lower bpc to retry validation */
6333                 }
6334
6335         } while (stream == NULL && requested_bpc >= 6);
6336
6337         if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6338                 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6339
6340                 aconnector->force_yuv420_output = true;
6341                 stream = create_validate_stream_for_sink(aconnector, drm_mode,
6342                                                 dm_state, old_stream);
6343                 aconnector->force_yuv420_output = false;
6344         }
6345
6346         return stream;
6347 }
6348
6349 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6350                                    struct drm_display_mode *mode)
6351 {
6352         int result = MODE_ERROR;
6353         struct dc_sink *dc_sink;
6354         /* TODO: Unhardcode stream count */
6355         struct dc_stream_state *stream;
6356         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6357
6358         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6359                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6360                 return result;
6361
6362         /*
6363          * Only run this the first time mode_valid is called to initilialize
6364          * EDID mgmt
6365          */
6366         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6367                 !aconnector->dc_em_sink)
6368                 handle_edid_mgmt(aconnector);
6369
6370         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6371
6372         if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6373                                 aconnector->base.force != DRM_FORCE_ON) {
6374                 DRM_ERROR("dc_sink is NULL!\n");
6375                 goto fail;
6376         }
6377
6378         stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6379         if (stream) {
6380                 dc_stream_release(stream);
6381                 result = MODE_OK;
6382         }
6383
6384 fail:
6385         /* TODO: error handling*/
6386         return result;
6387 }
6388
6389 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6390                                 struct dc_info_packet *out)
6391 {
6392         struct hdmi_drm_infoframe frame;
6393         unsigned char buf[30]; /* 26 + 4 */
6394         ssize_t len;
6395         int ret, i;
6396
6397         memset(out, 0, sizeof(*out));
6398
6399         if (!state->hdr_output_metadata)
6400                 return 0;
6401
6402         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6403         if (ret)
6404                 return ret;
6405
6406         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6407         if (len < 0)
6408                 return (int)len;
6409
6410         /* Static metadata is a fixed 26 bytes + 4 byte header. */
6411         if (len != 30)
6412                 return -EINVAL;
6413
6414         /* Prepare the infopacket for DC. */
6415         switch (state->connector->connector_type) {
6416         case DRM_MODE_CONNECTOR_HDMIA:
6417                 out->hb0 = 0x87; /* type */
6418                 out->hb1 = 0x01; /* version */
6419                 out->hb2 = 0x1A; /* length */
6420                 out->sb[0] = buf[3]; /* checksum */
6421                 i = 1;
6422                 break;
6423
6424         case DRM_MODE_CONNECTOR_DisplayPort:
6425         case DRM_MODE_CONNECTOR_eDP:
6426                 out->hb0 = 0x00; /* sdp id, zero */
6427                 out->hb1 = 0x87; /* type */
6428                 out->hb2 = 0x1D; /* payload len - 1 */
6429                 out->hb3 = (0x13 << 2); /* sdp version */
6430                 out->sb[0] = 0x01; /* version */
6431                 out->sb[1] = 0x1A; /* length */
6432                 i = 2;
6433                 break;
6434
6435         default:
6436                 return -EINVAL;
6437         }
6438
6439         memcpy(&out->sb[i], &buf[4], 26);
6440         out->valid = true;
6441
6442         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6443                        sizeof(out->sb), false);
6444
6445         return 0;
6446 }
6447
6448 static int
6449 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6450                                  struct drm_atomic_state *state)
6451 {
6452         struct drm_connector_state *new_con_state =
6453                 drm_atomic_get_new_connector_state(state, conn);
6454         struct drm_connector_state *old_con_state =
6455                 drm_atomic_get_old_connector_state(state, conn);
6456         struct drm_crtc *crtc = new_con_state->crtc;
6457         struct drm_crtc_state *new_crtc_state;
6458         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(conn);
6459         int ret;
6460
6461         trace_amdgpu_dm_connector_atomic_check(new_con_state);
6462
6463         if (conn->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
6464                 ret = drm_dp_mst_root_conn_atomic_check(new_con_state, &aconn->mst_mgr);
6465                 if (ret < 0)
6466                         return ret;
6467         }
6468
6469         if (!crtc)
6470                 return 0;
6471
6472         if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
6473                 struct dc_info_packet hdr_infopacket;
6474
6475                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6476                 if (ret)
6477                         return ret;
6478
6479                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6480                 if (IS_ERR(new_crtc_state))
6481                         return PTR_ERR(new_crtc_state);
6482
6483                 /*
6484                  * DC considers the stream backends changed if the
6485                  * static metadata changes. Forcing the modeset also
6486                  * gives a simple way for userspace to switch from
6487                  * 8bpc to 10bpc when setting the metadata to enter
6488                  * or exit HDR.
6489                  *
6490                  * Changing the static metadata after it's been
6491                  * set is permissible, however. So only force a
6492                  * modeset if we're entering or exiting HDR.
6493                  */
6494                 new_crtc_state->mode_changed =
6495                         !old_con_state->hdr_output_metadata ||
6496                         !new_con_state->hdr_output_metadata;
6497         }
6498
6499         return 0;
6500 }
6501
6502 static const struct drm_connector_helper_funcs
6503 amdgpu_dm_connector_helper_funcs = {
6504         /*
6505          * If hotplugging a second bigger display in FB Con mode, bigger resolution
6506          * modes will be filtered by drm_mode_validate_size(), and those modes
6507          * are missing after user start lightdm. So we need to renew modes list.
6508          * in get_modes call back, not just return the modes count
6509          */
6510         .get_modes = get_modes,
6511         .mode_valid = amdgpu_dm_connector_mode_valid,
6512         .atomic_check = amdgpu_dm_connector_atomic_check,
6513 };
6514
6515 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6516 {
6517
6518 }
6519
6520 int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth)
6521 {
6522         switch (display_color_depth) {
6523         case COLOR_DEPTH_666:
6524                 return 6;
6525         case COLOR_DEPTH_888:
6526                 return 8;
6527         case COLOR_DEPTH_101010:
6528                 return 10;
6529         case COLOR_DEPTH_121212:
6530                 return 12;
6531         case COLOR_DEPTH_141414:
6532                 return 14;
6533         case COLOR_DEPTH_161616:
6534                 return 16;
6535         default:
6536                 break;
6537         }
6538         return 0;
6539 }
6540
6541 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6542                                           struct drm_crtc_state *crtc_state,
6543                                           struct drm_connector_state *conn_state)
6544 {
6545         struct drm_atomic_state *state = crtc_state->state;
6546         struct drm_connector *connector = conn_state->connector;
6547         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6548         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6549         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6550         struct drm_dp_mst_topology_mgr *mst_mgr;
6551         struct drm_dp_mst_port *mst_port;
6552         struct drm_dp_mst_topology_state *mst_state;
6553         enum dc_color_depth color_depth;
6554         int clock, bpp = 0;
6555         bool is_y420 = false;
6556
6557         if (!aconnector->port || !aconnector->dc_sink)
6558                 return 0;
6559
6560         mst_port = aconnector->port;
6561         mst_mgr = &aconnector->mst_port->mst_mgr;
6562
6563         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6564                 return 0;
6565
6566         mst_state = drm_atomic_get_mst_topology_state(state, mst_mgr);
6567         if (IS_ERR(mst_state))
6568                 return PTR_ERR(mst_state);
6569
6570         if (!mst_state->pbn_div)
6571                 mst_state->pbn_div = dm_mst_get_pbn_divider(aconnector->mst_port->dc_link);
6572
6573         if (!state->duplicated) {
6574                 int max_bpc = conn_state->max_requested_bpc;
6575                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6576                           aconnector->force_yuv420_output;
6577                 color_depth = convert_color_depth_from_display_info(connector,
6578                                                                     is_y420,
6579                                                                     max_bpc);
6580                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6581                 clock = adjusted_mode->clock;
6582                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6583         }
6584
6585         dm_new_connector_state->vcpi_slots =
6586                 drm_dp_atomic_find_time_slots(state, mst_mgr, mst_port,
6587                                               dm_new_connector_state->pbn);
6588         if (dm_new_connector_state->vcpi_slots < 0) {
6589                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6590                 return dm_new_connector_state->vcpi_slots;
6591         }
6592         return 0;
6593 }
6594
6595 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6596         .disable = dm_encoder_helper_disable,
6597         .atomic_check = dm_encoder_helper_atomic_check
6598 };
6599
6600 #if defined(CONFIG_DRM_AMD_DC_DCN)
6601 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6602                                             struct dc_state *dc_state,
6603                                             struct dsc_mst_fairness_vars *vars)
6604 {
6605         struct dc_stream_state *stream = NULL;
6606         struct drm_connector *connector;
6607         struct drm_connector_state *new_con_state;
6608         struct amdgpu_dm_connector *aconnector;
6609         struct dm_connector_state *dm_conn_state;
6610         int i, j, ret;
6611         int vcpi, pbn_div, pbn, slot_num = 0;
6612
6613         for_each_new_connector_in_state(state, connector, new_con_state, i) {
6614
6615                 aconnector = to_amdgpu_dm_connector(connector);
6616
6617                 if (!aconnector->port)
6618                         continue;
6619
6620                 if (!new_con_state || !new_con_state->crtc)
6621                         continue;
6622
6623                 dm_conn_state = to_dm_connector_state(new_con_state);
6624
6625                 for (j = 0; j < dc_state->stream_count; j++) {
6626                         stream = dc_state->streams[j];
6627                         if (!stream)
6628                                 continue;
6629
6630                         if ((struct amdgpu_dm_connector *)stream->dm_stream_context == aconnector)
6631                                 break;
6632
6633                         stream = NULL;
6634                 }
6635
6636                 if (!stream)
6637                         continue;
6638
6639                 pbn_div = dm_mst_get_pbn_divider(stream->link);
6640                 /* pbn is calculated by compute_mst_dsc_configs_for_state*/
6641                 for (j = 0; j < dc_state->stream_count; j++) {
6642                         if (vars[j].aconnector == aconnector) {
6643                                 pbn = vars[j].pbn;
6644                                 break;
6645                         }
6646                 }
6647
6648                 if (j == dc_state->stream_count)
6649                         continue;
6650
6651                 slot_num = DIV_ROUND_UP(pbn, pbn_div);
6652
6653                 if (stream->timing.flags.DSC != 1) {
6654                         dm_conn_state->pbn = pbn;
6655                         dm_conn_state->vcpi_slots = slot_num;
6656
6657                         ret = drm_dp_mst_atomic_enable_dsc(state, aconnector->port,
6658                                                            dm_conn_state->pbn, false);
6659                         if (ret < 0)
6660                                 return ret;
6661
6662                         continue;
6663                 }
6664
6665                 vcpi = drm_dp_mst_atomic_enable_dsc(state, aconnector->port, pbn, true);
6666                 if (vcpi < 0)
6667                         return vcpi;
6668
6669                 dm_conn_state->pbn = pbn;
6670                 dm_conn_state->vcpi_slots = vcpi;
6671         }
6672         return 0;
6673 }
6674 #endif
6675
6676 static int to_drm_connector_type(enum signal_type st)
6677 {
6678         switch (st) {
6679         case SIGNAL_TYPE_HDMI_TYPE_A:
6680                 return DRM_MODE_CONNECTOR_HDMIA;
6681         case SIGNAL_TYPE_EDP:
6682                 return DRM_MODE_CONNECTOR_eDP;
6683         case SIGNAL_TYPE_LVDS:
6684                 return DRM_MODE_CONNECTOR_LVDS;
6685         case SIGNAL_TYPE_RGB:
6686                 return DRM_MODE_CONNECTOR_VGA;
6687         case SIGNAL_TYPE_DISPLAY_PORT:
6688         case SIGNAL_TYPE_DISPLAY_PORT_MST:
6689                 return DRM_MODE_CONNECTOR_DisplayPort;
6690         case SIGNAL_TYPE_DVI_DUAL_LINK:
6691         case SIGNAL_TYPE_DVI_SINGLE_LINK:
6692                 return DRM_MODE_CONNECTOR_DVID;
6693         case SIGNAL_TYPE_VIRTUAL:
6694                 return DRM_MODE_CONNECTOR_VIRTUAL;
6695
6696         default:
6697                 return DRM_MODE_CONNECTOR_Unknown;
6698         }
6699 }
6700
6701 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6702 {
6703         struct drm_encoder *encoder;
6704
6705         /* There is only one encoder per connector */
6706         drm_connector_for_each_possible_encoder(connector, encoder)
6707                 return encoder;
6708
6709         return NULL;
6710 }
6711
6712 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6713 {
6714         struct drm_encoder *encoder;
6715         struct amdgpu_encoder *amdgpu_encoder;
6716
6717         encoder = amdgpu_dm_connector_to_encoder(connector);
6718
6719         if (encoder == NULL)
6720                 return;
6721
6722         amdgpu_encoder = to_amdgpu_encoder(encoder);
6723
6724         amdgpu_encoder->native_mode.clock = 0;
6725
6726         if (!list_empty(&connector->probed_modes)) {
6727                 struct drm_display_mode *preferred_mode = NULL;
6728
6729                 list_for_each_entry(preferred_mode,
6730                                     &connector->probed_modes,
6731                                     head) {
6732                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6733                                 amdgpu_encoder->native_mode = *preferred_mode;
6734
6735                         break;
6736                 }
6737
6738         }
6739 }
6740
6741 static struct drm_display_mode *
6742 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6743                              char *name,
6744                              int hdisplay, int vdisplay)
6745 {
6746         struct drm_device *dev = encoder->dev;
6747         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6748         struct drm_display_mode *mode = NULL;
6749         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6750
6751         mode = drm_mode_duplicate(dev, native_mode);
6752
6753         if (mode == NULL)
6754                 return NULL;
6755
6756         mode->hdisplay = hdisplay;
6757         mode->vdisplay = vdisplay;
6758         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6759         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6760
6761         return mode;
6762
6763 }
6764
6765 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6766                                                  struct drm_connector *connector)
6767 {
6768         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6769         struct drm_display_mode *mode = NULL;
6770         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6771         struct amdgpu_dm_connector *amdgpu_dm_connector =
6772                                 to_amdgpu_dm_connector(connector);
6773         int i;
6774         int n;
6775         struct mode_size {
6776                 char name[DRM_DISPLAY_MODE_LEN];
6777                 int w;
6778                 int h;
6779         } common_modes[] = {
6780                 {  "640x480",  640,  480},
6781                 {  "800x600",  800,  600},
6782                 { "1024x768", 1024,  768},
6783                 { "1280x720", 1280,  720},
6784                 { "1280x800", 1280,  800},
6785                 {"1280x1024", 1280, 1024},
6786                 { "1440x900", 1440,  900},
6787                 {"1680x1050", 1680, 1050},
6788                 {"1600x1200", 1600, 1200},
6789                 {"1920x1080", 1920, 1080},
6790                 {"1920x1200", 1920, 1200}
6791         };
6792
6793         n = ARRAY_SIZE(common_modes);
6794
6795         for (i = 0; i < n; i++) {
6796                 struct drm_display_mode *curmode = NULL;
6797                 bool mode_existed = false;
6798
6799                 if (common_modes[i].w > native_mode->hdisplay ||
6800                     common_modes[i].h > native_mode->vdisplay ||
6801                    (common_modes[i].w == native_mode->hdisplay &&
6802                     common_modes[i].h == native_mode->vdisplay))
6803                         continue;
6804
6805                 list_for_each_entry(curmode, &connector->probed_modes, head) {
6806                         if (common_modes[i].w == curmode->hdisplay &&
6807                             common_modes[i].h == curmode->vdisplay) {
6808                                 mode_existed = true;
6809                                 break;
6810                         }
6811                 }
6812
6813                 if (mode_existed)
6814                         continue;
6815
6816                 mode = amdgpu_dm_create_common_mode(encoder,
6817                                 common_modes[i].name, common_modes[i].w,
6818                                 common_modes[i].h);
6819                 if (!mode)
6820                         continue;
6821
6822                 drm_mode_probed_add(connector, mode);
6823                 amdgpu_dm_connector->num_modes++;
6824         }
6825 }
6826
6827 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
6828 {
6829         struct drm_encoder *encoder;
6830         struct amdgpu_encoder *amdgpu_encoder;
6831         const struct drm_display_mode *native_mode;
6832
6833         if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
6834             connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
6835                 return;
6836
6837         mutex_lock(&connector->dev->mode_config.mutex);
6838         amdgpu_dm_connector_get_modes(connector);
6839         mutex_unlock(&connector->dev->mode_config.mutex);
6840
6841         encoder = amdgpu_dm_connector_to_encoder(connector);
6842         if (!encoder)
6843                 return;
6844
6845         amdgpu_encoder = to_amdgpu_encoder(encoder);
6846
6847         native_mode = &amdgpu_encoder->native_mode;
6848         if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
6849                 return;
6850
6851         drm_connector_set_panel_orientation_with_quirk(connector,
6852                                                        DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
6853                                                        native_mode->hdisplay,
6854                                                        native_mode->vdisplay);
6855 }
6856
6857 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6858                                               struct edid *edid)
6859 {
6860         struct amdgpu_dm_connector *amdgpu_dm_connector =
6861                         to_amdgpu_dm_connector(connector);
6862
6863         if (edid) {
6864                 /* empty probed_modes */
6865                 INIT_LIST_HEAD(&connector->probed_modes);
6866                 amdgpu_dm_connector->num_modes =
6867                                 drm_add_edid_modes(connector, edid);
6868
6869                 /* sorting the probed modes before calling function
6870                  * amdgpu_dm_get_native_mode() since EDID can have
6871                  * more than one preferred mode. The modes that are
6872                  * later in the probed mode list could be of higher
6873                  * and preferred resolution. For example, 3840x2160
6874                  * resolution in base EDID preferred timing and 4096x2160
6875                  * preferred resolution in DID extension block later.
6876                  */
6877                 drm_mode_sort(&connector->probed_modes);
6878                 amdgpu_dm_get_native_mode(connector);
6879
6880                 /* Freesync capabilities are reset by calling
6881                  * drm_add_edid_modes() and need to be
6882                  * restored here.
6883                  */
6884                 amdgpu_dm_update_freesync_caps(connector, edid);
6885         } else {
6886                 amdgpu_dm_connector->num_modes = 0;
6887         }
6888 }
6889
6890 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
6891                               struct drm_display_mode *mode)
6892 {
6893         struct drm_display_mode *m;
6894
6895         list_for_each_entry (m, &aconnector->base.probed_modes, head) {
6896                 if (drm_mode_equal(m, mode))
6897                         return true;
6898         }
6899
6900         return false;
6901 }
6902
6903 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
6904 {
6905         const struct drm_display_mode *m;
6906         struct drm_display_mode *new_mode;
6907         uint i;
6908         uint32_t new_modes_count = 0;
6909
6910         /* Standard FPS values
6911          *
6912          * 23.976       - TV/NTSC
6913          * 24           - Cinema
6914          * 25           - TV/PAL
6915          * 29.97        - TV/NTSC
6916          * 30           - TV/NTSC
6917          * 48           - Cinema HFR
6918          * 50           - TV/PAL
6919          * 60           - Commonly used
6920          * 48,72,96,120 - Multiples of 24
6921          */
6922         static const uint32_t common_rates[] = {
6923                 23976, 24000, 25000, 29970, 30000,
6924                 48000, 50000, 60000, 72000, 96000, 120000
6925         };
6926
6927         /*
6928          * Find mode with highest refresh rate with the same resolution
6929          * as the preferred mode. Some monitors report a preferred mode
6930          * with lower resolution than the highest refresh rate supported.
6931          */
6932
6933         m = get_highest_refresh_rate_mode(aconnector, true);
6934         if (!m)
6935                 return 0;
6936
6937         for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
6938                 uint64_t target_vtotal, target_vtotal_diff;
6939                 uint64_t num, den;
6940
6941                 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
6942                         continue;
6943
6944                 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
6945                     common_rates[i] > aconnector->max_vfreq * 1000)
6946                         continue;
6947
6948                 num = (unsigned long long)m->clock * 1000 * 1000;
6949                 den = common_rates[i] * (unsigned long long)m->htotal;
6950                 target_vtotal = div_u64(num, den);
6951                 target_vtotal_diff = target_vtotal - m->vtotal;
6952
6953                 /* Check for illegal modes */
6954                 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
6955                     m->vsync_end + target_vtotal_diff < m->vsync_start ||
6956                     m->vtotal + target_vtotal_diff < m->vsync_end)
6957                         continue;
6958
6959                 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
6960                 if (!new_mode)
6961                         goto out;
6962
6963                 new_mode->vtotal += (u16)target_vtotal_diff;
6964                 new_mode->vsync_start += (u16)target_vtotal_diff;
6965                 new_mode->vsync_end += (u16)target_vtotal_diff;
6966                 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6967                 new_mode->type |= DRM_MODE_TYPE_DRIVER;
6968
6969                 if (!is_duplicate_mode(aconnector, new_mode)) {
6970                         drm_mode_probed_add(&aconnector->base, new_mode);
6971                         new_modes_count += 1;
6972                 } else
6973                         drm_mode_destroy(aconnector->base.dev, new_mode);
6974         }
6975  out:
6976         return new_modes_count;
6977 }
6978
6979 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
6980                                                    struct edid *edid)
6981 {
6982         struct amdgpu_dm_connector *amdgpu_dm_connector =
6983                 to_amdgpu_dm_connector(connector);
6984
6985         if (!(amdgpu_freesync_vid_mode && edid))
6986                 return;
6987
6988         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
6989                 amdgpu_dm_connector->num_modes +=
6990                         add_fs_modes(amdgpu_dm_connector);
6991 }
6992
6993 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6994 {
6995         struct amdgpu_dm_connector *amdgpu_dm_connector =
6996                         to_amdgpu_dm_connector(connector);
6997         struct drm_encoder *encoder;
6998         struct edid *edid = amdgpu_dm_connector->edid;
6999
7000         encoder = amdgpu_dm_connector_to_encoder(connector);
7001
7002         if (!drm_edid_is_valid(edid)) {
7003                 amdgpu_dm_connector->num_modes =
7004                                 drm_add_modes_noedid(connector, 640, 480);
7005         } else {
7006                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
7007                 amdgpu_dm_connector_add_common_modes(encoder, connector);
7008                 amdgpu_dm_connector_add_freesync_modes(connector, edid);
7009         }
7010         amdgpu_dm_fbc_init(connector);
7011
7012         return amdgpu_dm_connector->num_modes;
7013 }
7014
7015 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7016                                      struct amdgpu_dm_connector *aconnector,
7017                                      int connector_type,
7018                                      struct dc_link *link,
7019                                      int link_index)
7020 {
7021         struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7022
7023         /*
7024          * Some of the properties below require access to state, like bpc.
7025          * Allocate some default initial connector state with our reset helper.
7026          */
7027         if (aconnector->base.funcs->reset)
7028                 aconnector->base.funcs->reset(&aconnector->base);
7029
7030         aconnector->connector_id = link_index;
7031         aconnector->dc_link = link;
7032         aconnector->base.interlace_allowed = false;
7033         aconnector->base.doublescan_allowed = false;
7034         aconnector->base.stereo_allowed = false;
7035         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7036         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7037         aconnector->audio_inst = -1;
7038         mutex_init(&aconnector->hpd_lock);
7039
7040         /*
7041          * configure support HPD hot plug connector_>polled default value is 0
7042          * which means HPD hot plug not supported
7043          */
7044         switch (connector_type) {
7045         case DRM_MODE_CONNECTOR_HDMIA:
7046                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7047                 aconnector->base.ycbcr_420_allowed =
7048                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7049                 break;
7050         case DRM_MODE_CONNECTOR_DisplayPort:
7051                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7052                 link->link_enc = link_enc_cfg_get_link_enc(link);
7053                 ASSERT(link->link_enc);
7054                 if (link->link_enc)
7055                         aconnector->base.ycbcr_420_allowed =
7056                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
7057                 break;
7058         case DRM_MODE_CONNECTOR_DVID:
7059                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7060                 break;
7061         default:
7062                 break;
7063         }
7064
7065         drm_object_attach_property(&aconnector->base.base,
7066                                 dm->ddev->mode_config.scaling_mode_property,
7067                                 DRM_MODE_SCALE_NONE);
7068
7069         drm_object_attach_property(&aconnector->base.base,
7070                                 adev->mode_info.underscan_property,
7071                                 UNDERSCAN_OFF);
7072         drm_object_attach_property(&aconnector->base.base,
7073                                 adev->mode_info.underscan_hborder_property,
7074                                 0);
7075         drm_object_attach_property(&aconnector->base.base,
7076                                 adev->mode_info.underscan_vborder_property,
7077                                 0);
7078
7079         if (!aconnector->mst_port)
7080                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7081
7082         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
7083         aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7084         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7085
7086         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7087             (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7088                 drm_object_attach_property(&aconnector->base.base,
7089                                 adev->mode_info.abm_level_property, 0);
7090         }
7091
7092         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7093             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7094             connector_type == DRM_MODE_CONNECTOR_eDP) {
7095                 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
7096
7097                 if (!aconnector->mst_port)
7098                         drm_connector_attach_vrr_capable_property(&aconnector->base);
7099
7100 #ifdef CONFIG_DRM_AMD_DC_HDCP
7101                 if (adev->dm.hdcp_workqueue)
7102                         drm_connector_attach_content_protection_property(&aconnector->base, true);
7103 #endif
7104         }
7105 }
7106
7107 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7108                               struct i2c_msg *msgs, int num)
7109 {
7110         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7111         struct ddc_service *ddc_service = i2c->ddc_service;
7112         struct i2c_command cmd;
7113         int i;
7114         int result = -EIO;
7115
7116         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7117
7118         if (!cmd.payloads)
7119                 return result;
7120
7121         cmd.number_of_payloads = num;
7122         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7123         cmd.speed = 100;
7124
7125         for (i = 0; i < num; i++) {
7126                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7127                 cmd.payloads[i].address = msgs[i].addr;
7128                 cmd.payloads[i].length = msgs[i].len;
7129                 cmd.payloads[i].data = msgs[i].buf;
7130         }
7131
7132         if (dc_submit_i2c(
7133                         ddc_service->ctx->dc,
7134                         ddc_service->link->link_index,
7135                         &cmd))
7136                 result = num;
7137
7138         kfree(cmd.payloads);
7139         return result;
7140 }
7141
7142 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7143 {
7144         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7145 }
7146
7147 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7148         .master_xfer = amdgpu_dm_i2c_xfer,
7149         .functionality = amdgpu_dm_i2c_func,
7150 };
7151
7152 static struct amdgpu_i2c_adapter *
7153 create_i2c(struct ddc_service *ddc_service,
7154            int link_index,
7155            int *res)
7156 {
7157         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7158         struct amdgpu_i2c_adapter *i2c;
7159
7160         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7161         if (!i2c)
7162                 return NULL;
7163         i2c->base.owner = THIS_MODULE;
7164         i2c->base.class = I2C_CLASS_DDC;
7165         i2c->base.dev.parent = &adev->pdev->dev;
7166         i2c->base.algo = &amdgpu_dm_i2c_algo;
7167         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7168         i2c_set_adapdata(&i2c->base, i2c);
7169         i2c->ddc_service = ddc_service;
7170
7171         return i2c;
7172 }
7173
7174
7175 /*
7176  * Note: this function assumes that dc_link_detect() was called for the
7177  * dc_link which will be represented by this aconnector.
7178  */
7179 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7180                                     struct amdgpu_dm_connector *aconnector,
7181                                     uint32_t link_index,
7182                                     struct amdgpu_encoder *aencoder)
7183 {
7184         int res = 0;
7185         int connector_type;
7186         struct dc *dc = dm->dc;
7187         struct dc_link *link = dc_get_link_at_index(dc, link_index);
7188         struct amdgpu_i2c_adapter *i2c;
7189
7190         link->priv = aconnector;
7191
7192         DRM_DEBUG_DRIVER("%s()\n", __func__);
7193
7194         i2c = create_i2c(link->ddc, link->link_index, &res);
7195         if (!i2c) {
7196                 DRM_ERROR("Failed to create i2c adapter data\n");
7197                 return -ENOMEM;
7198         }
7199
7200         aconnector->i2c = i2c;
7201         res = i2c_add_adapter(&i2c->base);
7202
7203         if (res) {
7204                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7205                 goto out_free;
7206         }
7207
7208         connector_type = to_drm_connector_type(link->connector_signal);
7209
7210         res = drm_connector_init_with_ddc(
7211                         dm->ddev,
7212                         &aconnector->base,
7213                         &amdgpu_dm_connector_funcs,
7214                         connector_type,
7215                         &i2c->base);
7216
7217         if (res) {
7218                 DRM_ERROR("connector_init failed\n");
7219                 aconnector->connector_id = -1;
7220                 goto out_free;
7221         }
7222
7223         drm_connector_helper_add(
7224                         &aconnector->base,
7225                         &amdgpu_dm_connector_helper_funcs);
7226
7227         amdgpu_dm_connector_init_helper(
7228                 dm,
7229                 aconnector,
7230                 connector_type,
7231                 link,
7232                 link_index);
7233
7234         drm_connector_attach_encoder(
7235                 &aconnector->base, &aencoder->base);
7236
7237         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7238                 || connector_type == DRM_MODE_CONNECTOR_eDP)
7239                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7240
7241 out_free:
7242         if (res) {
7243                 kfree(i2c);
7244                 aconnector->i2c = NULL;
7245         }
7246         return res;
7247 }
7248
7249 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7250 {
7251         switch (adev->mode_info.num_crtc) {
7252         case 1:
7253                 return 0x1;
7254         case 2:
7255                 return 0x3;
7256         case 3:
7257                 return 0x7;
7258         case 4:
7259                 return 0xf;
7260         case 5:
7261                 return 0x1f;
7262         case 6:
7263         default:
7264                 return 0x3f;
7265         }
7266 }
7267
7268 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7269                                   struct amdgpu_encoder *aencoder,
7270                                   uint32_t link_index)
7271 {
7272         struct amdgpu_device *adev = drm_to_adev(dev);
7273
7274         int res = drm_encoder_init(dev,
7275                                    &aencoder->base,
7276                                    &amdgpu_dm_encoder_funcs,
7277                                    DRM_MODE_ENCODER_TMDS,
7278                                    NULL);
7279
7280         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7281
7282         if (!res)
7283                 aencoder->encoder_id = link_index;
7284         else
7285                 aencoder->encoder_id = -1;
7286
7287         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7288
7289         return res;
7290 }
7291
7292 static void manage_dm_interrupts(struct amdgpu_device *adev,
7293                                  struct amdgpu_crtc *acrtc,
7294                                  bool enable)
7295 {
7296         /*
7297          * We have no guarantee that the frontend index maps to the same
7298          * backend index - some even map to more than one.
7299          *
7300          * TODO: Use a different interrupt or check DC itself for the mapping.
7301          */
7302         int irq_type =
7303                 amdgpu_display_crtc_idx_to_irq_type(
7304                         adev,
7305                         acrtc->crtc_id);
7306
7307         if (enable) {
7308                 drm_crtc_vblank_on(&acrtc->base);
7309                 amdgpu_irq_get(
7310                         adev,
7311                         &adev->pageflip_irq,
7312                         irq_type);
7313 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7314                 amdgpu_irq_get(
7315                         adev,
7316                         &adev->vline0_irq,
7317                         irq_type);
7318 #endif
7319         } else {
7320 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7321                 amdgpu_irq_put(
7322                         adev,
7323                         &adev->vline0_irq,
7324                         irq_type);
7325 #endif
7326                 amdgpu_irq_put(
7327                         adev,
7328                         &adev->pageflip_irq,
7329                         irq_type);
7330                 drm_crtc_vblank_off(&acrtc->base);
7331         }
7332 }
7333
7334 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7335                                       struct amdgpu_crtc *acrtc)
7336 {
7337         int irq_type =
7338                 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7339
7340         /**
7341          * This reads the current state for the IRQ and force reapplies
7342          * the setting to hardware.
7343          */
7344         amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7345 }
7346
7347 static bool
7348 is_scaling_state_different(const struct dm_connector_state *dm_state,
7349                            const struct dm_connector_state *old_dm_state)
7350 {
7351         if (dm_state->scaling != old_dm_state->scaling)
7352                 return true;
7353         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7354                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7355                         return true;
7356         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7357                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7358                         return true;
7359         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7360                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7361                 return true;
7362         return false;
7363 }
7364
7365 #ifdef CONFIG_DRM_AMD_DC_HDCP
7366 static bool is_content_protection_different(struct drm_connector_state *state,
7367                                             const struct drm_connector_state *old_state,
7368                                             const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7369 {
7370         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7371         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7372
7373         /* Handle: Type0/1 change */
7374         if (old_state->hdcp_content_type != state->hdcp_content_type &&
7375             state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7376                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7377                 return true;
7378         }
7379
7380         /* CP is being re enabled, ignore this
7381          *
7382          * Handles:     ENABLED -> DESIRED
7383          */
7384         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7385             state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7386                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7387                 return false;
7388         }
7389
7390         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7391          *
7392          * Handles:     UNDESIRED -> ENABLED
7393          */
7394         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7395             state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7396                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7397
7398         /* Stream removed and re-enabled
7399          *
7400          * Can sometimes overlap with the HPD case,
7401          * thus set update_hdcp to false to avoid
7402          * setting HDCP multiple times.
7403          *
7404          * Handles:     DESIRED -> DESIRED (Special case)
7405          */
7406         if (!(old_state->crtc && old_state->crtc->enabled) &&
7407                 state->crtc && state->crtc->enabled &&
7408                 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7409                 dm_con_state->update_hdcp = false;
7410                 return true;
7411         }
7412
7413         /* Hot-plug, headless s3, dpms
7414          *
7415          * Only start HDCP if the display is connected/enabled.
7416          * update_hdcp flag will be set to false until the next
7417          * HPD comes in.
7418          *
7419          * Handles:     DESIRED -> DESIRED (Special case)
7420          */
7421         if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7422             connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7423                 dm_con_state->update_hdcp = false;
7424                 return true;
7425         }
7426
7427         /*
7428          * Handles:     UNDESIRED -> UNDESIRED
7429          *              DESIRED -> DESIRED
7430          *              ENABLED -> ENABLED
7431          */
7432         if (old_state->content_protection == state->content_protection)
7433                 return false;
7434
7435         /*
7436          * Handles:     UNDESIRED -> DESIRED
7437          *              DESIRED -> UNDESIRED
7438          *              ENABLED -> UNDESIRED
7439          */
7440         if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7441                 return true;
7442
7443         /*
7444          * Handles:     DESIRED -> ENABLED
7445          */
7446         return false;
7447 }
7448
7449 #endif
7450 static void remove_stream(struct amdgpu_device *adev,
7451                           struct amdgpu_crtc *acrtc,
7452                           struct dc_stream_state *stream)
7453 {
7454         /* this is the update mode case */
7455
7456         acrtc->otg_inst = -1;
7457         acrtc->enabled = false;
7458 }
7459
7460 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7461 {
7462
7463         assert_spin_locked(&acrtc->base.dev->event_lock);
7464         WARN_ON(acrtc->event);
7465
7466         acrtc->event = acrtc->base.state->event;
7467
7468         /* Set the flip status */
7469         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7470
7471         /* Mark this event as consumed */
7472         acrtc->base.state->event = NULL;
7473
7474         DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7475                      acrtc->crtc_id);
7476 }
7477
7478 static void update_freesync_state_on_stream(
7479         struct amdgpu_display_manager *dm,
7480         struct dm_crtc_state *new_crtc_state,
7481         struct dc_stream_state *new_stream,
7482         struct dc_plane_state *surface,
7483         u32 flip_timestamp_in_us)
7484 {
7485         struct mod_vrr_params vrr_params;
7486         struct dc_info_packet vrr_infopacket = {0};
7487         struct amdgpu_device *adev = dm->adev;
7488         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7489         unsigned long flags;
7490         bool pack_sdp_v1_3 = false;
7491
7492         if (!new_stream)
7493                 return;
7494
7495         /*
7496          * TODO: Determine why min/max totals and vrefresh can be 0 here.
7497          * For now it's sufficient to just guard against these conditions.
7498          */
7499
7500         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7501                 return;
7502
7503         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7504         vrr_params = acrtc->dm_irq_params.vrr_params;
7505
7506         if (surface) {
7507                 mod_freesync_handle_preflip(
7508                         dm->freesync_module,
7509                         surface,
7510                         new_stream,
7511                         flip_timestamp_in_us,
7512                         &vrr_params);
7513
7514                 if (adev->family < AMDGPU_FAMILY_AI &&
7515                     amdgpu_dm_vrr_active(new_crtc_state)) {
7516                         mod_freesync_handle_v_update(dm->freesync_module,
7517                                                      new_stream, &vrr_params);
7518
7519                         /* Need to call this before the frame ends. */
7520                         dc_stream_adjust_vmin_vmax(dm->dc,
7521                                                    new_crtc_state->stream,
7522                                                    &vrr_params.adjust);
7523                 }
7524         }
7525
7526         mod_freesync_build_vrr_infopacket(
7527                 dm->freesync_module,
7528                 new_stream,
7529                 &vrr_params,
7530                 PACKET_TYPE_VRR,
7531                 TRANSFER_FUNC_UNKNOWN,
7532                 &vrr_infopacket,
7533                 pack_sdp_v1_3);
7534
7535         new_crtc_state->freesync_vrr_info_changed |=
7536                 (memcmp(&new_crtc_state->vrr_infopacket,
7537                         &vrr_infopacket,
7538                         sizeof(vrr_infopacket)) != 0);
7539
7540         acrtc->dm_irq_params.vrr_params = vrr_params;
7541         new_crtc_state->vrr_infopacket = vrr_infopacket;
7542
7543         new_stream->vrr_infopacket = vrr_infopacket;
7544
7545         if (new_crtc_state->freesync_vrr_info_changed)
7546                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7547                               new_crtc_state->base.crtc->base.id,
7548                               (int)new_crtc_state->base.vrr_enabled,
7549                               (int)vrr_params.state);
7550
7551         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7552 }
7553
7554 static void update_stream_irq_parameters(
7555         struct amdgpu_display_manager *dm,
7556         struct dm_crtc_state *new_crtc_state)
7557 {
7558         struct dc_stream_state *new_stream = new_crtc_state->stream;
7559         struct mod_vrr_params vrr_params;
7560         struct mod_freesync_config config = new_crtc_state->freesync_config;
7561         struct amdgpu_device *adev = dm->adev;
7562         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7563         unsigned long flags;
7564
7565         if (!new_stream)
7566                 return;
7567
7568         /*
7569          * TODO: Determine why min/max totals and vrefresh can be 0 here.
7570          * For now it's sufficient to just guard against these conditions.
7571          */
7572         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7573                 return;
7574
7575         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7576         vrr_params = acrtc->dm_irq_params.vrr_params;
7577
7578         if (new_crtc_state->vrr_supported &&
7579             config.min_refresh_in_uhz &&
7580             config.max_refresh_in_uhz) {
7581                 /*
7582                  * if freesync compatible mode was set, config.state will be set
7583                  * in atomic check
7584                  */
7585                 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
7586                     (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
7587                      new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
7588                         vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
7589                         vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
7590                         vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
7591                         vrr_params.state = VRR_STATE_ACTIVE_FIXED;
7592                 } else {
7593                         config.state = new_crtc_state->base.vrr_enabled ?
7594                                                      VRR_STATE_ACTIVE_VARIABLE :
7595                                                      VRR_STATE_INACTIVE;
7596                 }
7597         } else {
7598                 config.state = VRR_STATE_UNSUPPORTED;
7599         }
7600
7601         mod_freesync_build_vrr_params(dm->freesync_module,
7602                                       new_stream,
7603                                       &config, &vrr_params);
7604
7605         new_crtc_state->freesync_config = config;
7606         /* Copy state for access from DM IRQ handler */
7607         acrtc->dm_irq_params.freesync_config = config;
7608         acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7609         acrtc->dm_irq_params.vrr_params = vrr_params;
7610         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7611 }
7612
7613 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7614                                             struct dm_crtc_state *new_state)
7615 {
7616         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7617         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7618
7619         if (!old_vrr_active && new_vrr_active) {
7620                 /* Transition VRR inactive -> active:
7621                  * While VRR is active, we must not disable vblank irq, as a
7622                  * reenable after disable would compute bogus vblank/pflip
7623                  * timestamps if it likely happened inside display front-porch.
7624                  *
7625                  * We also need vupdate irq for the actual core vblank handling
7626                  * at end of vblank.
7627                  */
7628                 WARN_ON(dm_set_vupdate_irq(new_state->base.crtc, true) != 0);
7629                 WARN_ON(drm_crtc_vblank_get(new_state->base.crtc) != 0);
7630                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7631                                  __func__, new_state->base.crtc->base.id);
7632         } else if (old_vrr_active && !new_vrr_active) {
7633                 /* Transition VRR active -> inactive:
7634                  * Allow vblank irq disable again for fixed refresh rate.
7635                  */
7636                 WARN_ON(dm_set_vupdate_irq(new_state->base.crtc, false) != 0);
7637                 drm_crtc_vblank_put(new_state->base.crtc);
7638                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7639                                  __func__, new_state->base.crtc->base.id);
7640         }
7641 }
7642
7643 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7644 {
7645         struct drm_plane *plane;
7646         struct drm_plane_state *old_plane_state;
7647         int i;
7648
7649         /*
7650          * TODO: Make this per-stream so we don't issue redundant updates for
7651          * commits with multiple streams.
7652          */
7653         for_each_old_plane_in_state(state, plane, old_plane_state, i)
7654                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7655                         handle_cursor_update(plane, old_plane_state);
7656 }
7657
7658 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
7659                                     struct dc_state *dc_state,
7660                                     struct drm_device *dev,
7661                                     struct amdgpu_display_manager *dm,
7662                                     struct drm_crtc *pcrtc,
7663                                     bool wait_for_vblank)
7664 {
7665         uint32_t i;
7666         uint64_t timestamp_ns;
7667         struct drm_plane *plane;
7668         struct drm_plane_state *old_plane_state, *new_plane_state;
7669         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
7670         struct drm_crtc_state *new_pcrtc_state =
7671                         drm_atomic_get_new_crtc_state(state, pcrtc);
7672         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
7673         struct dm_crtc_state *dm_old_crtc_state =
7674                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
7675         int planes_count = 0, vpos, hpos;
7676         unsigned long flags;
7677         uint32_t target_vblank, last_flip_vblank;
7678         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7679         bool cursor_update = false;
7680         bool pflip_present = false;
7681         struct {
7682                 struct dc_surface_update surface_updates[MAX_SURFACES];
7683                 struct dc_plane_info plane_infos[MAX_SURFACES];
7684                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
7685                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7686                 struct dc_stream_update stream_update;
7687         } *bundle;
7688
7689         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7690
7691         if (!bundle) {
7692                 dm_error("Failed to allocate update bundle\n");
7693                 goto cleanup;
7694         }
7695
7696         /*
7697          * Disable the cursor first if we're disabling all the planes.
7698          * It'll remain on the screen after the planes are re-enabled
7699          * if we don't.
7700          */
7701         if (acrtc_state->active_planes == 0)
7702                 amdgpu_dm_commit_cursors(state);
7703
7704         /* update planes when needed */
7705         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7706                 struct drm_crtc *crtc = new_plane_state->crtc;
7707                 struct drm_crtc_state *new_crtc_state;
7708                 struct drm_framebuffer *fb = new_plane_state->fb;
7709                 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
7710                 bool plane_needs_flip;
7711                 struct dc_plane_state *dc_plane;
7712                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7713
7714                 /* Cursor plane is handled after stream updates */
7715                 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
7716                         if ((fb && crtc == pcrtc) ||
7717                             (old_plane_state->fb && old_plane_state->crtc == pcrtc))
7718                                 cursor_update = true;
7719
7720                         continue;
7721                 }
7722
7723                 if (!fb || !crtc || pcrtc != crtc)
7724                         continue;
7725
7726                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7727                 if (!new_crtc_state->active)
7728                         continue;
7729
7730                 dc_plane = dm_new_plane_state->dc_state;
7731
7732                 bundle->surface_updates[planes_count].surface = dc_plane;
7733                 if (new_pcrtc_state->color_mgmt_changed) {
7734                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7735                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7736                         bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7737                 }
7738
7739                 fill_dc_scaling_info(dm->adev, new_plane_state,
7740                                      &bundle->scaling_infos[planes_count]);
7741
7742                 bundle->surface_updates[planes_count].scaling_info =
7743                         &bundle->scaling_infos[planes_count];
7744
7745                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7746
7747                 pflip_present = pflip_present || plane_needs_flip;
7748
7749                 if (!plane_needs_flip) {
7750                         planes_count += 1;
7751                         continue;
7752                 }
7753
7754                 fill_dc_plane_info_and_addr(
7755                         dm->adev, new_plane_state,
7756                         afb->tiling_flags,
7757                         &bundle->plane_infos[planes_count],
7758                         &bundle->flip_addrs[planes_count].address,
7759                         afb->tmz_surface, false);
7760
7761                 drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n",
7762                                  new_plane_state->plane->index,
7763                                  bundle->plane_infos[planes_count].dcc.enable);
7764
7765                 bundle->surface_updates[planes_count].plane_info =
7766                         &bundle->plane_infos[planes_count];
7767
7768                 if (acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7769                         fill_dc_dirty_rects(plane, old_plane_state,
7770                                             new_plane_state, new_crtc_state,
7771                                             &bundle->flip_addrs[planes_count]);
7772
7773                 /*
7774                  * Only allow immediate flips for fast updates that don't
7775                  * change FB pitch, DCC state, rotation or mirroing.
7776                  */
7777                 bundle->flip_addrs[planes_count].flip_immediate =
7778                         crtc->state->async_flip &&
7779                         acrtc_state->update_type == UPDATE_TYPE_FAST;
7780
7781                 timestamp_ns = ktime_get_ns();
7782                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7783                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7784                 bundle->surface_updates[planes_count].surface = dc_plane;
7785
7786                 if (!bundle->surface_updates[planes_count].surface) {
7787                         DRM_ERROR("No surface for CRTC: id=%d\n",
7788                                         acrtc_attach->crtc_id);
7789                         continue;
7790                 }
7791
7792                 if (plane == pcrtc->primary)
7793                         update_freesync_state_on_stream(
7794                                 dm,
7795                                 acrtc_state,
7796                                 acrtc_state->stream,
7797                                 dc_plane,
7798                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7799
7800                 drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n",
7801                                  __func__,
7802                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7803                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7804
7805                 planes_count += 1;
7806
7807         }
7808
7809         if (pflip_present) {
7810                 if (!vrr_active) {
7811                         /* Use old throttling in non-vrr fixed refresh rate mode
7812                          * to keep flip scheduling based on target vblank counts
7813                          * working in a backwards compatible way, e.g., for
7814                          * clients using the GLX_OML_sync_control extension or
7815                          * DRI3/Present extension with defined target_msc.
7816                          */
7817                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7818                 }
7819                 else {
7820                         /* For variable refresh rate mode only:
7821                          * Get vblank of last completed flip to avoid > 1 vrr
7822                          * flips per video frame by use of throttling, but allow
7823                          * flip programming anywhere in the possibly large
7824                          * variable vrr vblank interval for fine-grained flip
7825                          * timing control and more opportunity to avoid stutter
7826                          * on late submission of flips.
7827                          */
7828                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7829                         last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
7830                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7831                 }
7832
7833                 target_vblank = last_flip_vblank + wait_for_vblank;
7834
7835                 /*
7836                  * Wait until we're out of the vertical blank period before the one
7837                  * targeted by the flip
7838                  */
7839                 while ((acrtc_attach->enabled &&
7840                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7841                                                             0, &vpos, &hpos, NULL,
7842                                                             NULL, &pcrtc->hwmode)
7843                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7844                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7845                         (int)(target_vblank -
7846                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7847                         usleep_range(1000, 1100);
7848                 }
7849
7850                 /**
7851                  * Prepare the flip event for the pageflip interrupt to handle.
7852                  *
7853                  * This only works in the case where we've already turned on the
7854                  * appropriate hardware blocks (eg. HUBP) so in the transition case
7855                  * from 0 -> n planes we have to skip a hardware generated event
7856                  * and rely on sending it from software.
7857                  */
7858                 if (acrtc_attach->base.state->event &&
7859                     acrtc_state->active_planes > 0) {
7860                         drm_crtc_vblank_get(pcrtc);
7861
7862                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7863
7864                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7865                         prepare_flip_isr(acrtc_attach);
7866
7867                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7868                 }
7869
7870                 if (acrtc_state->stream) {
7871                         if (acrtc_state->freesync_vrr_info_changed)
7872                                 bundle->stream_update.vrr_infopacket =
7873                                         &acrtc_state->stream->vrr_infopacket;
7874                 }
7875         } else if (cursor_update && acrtc_state->active_planes > 0 &&
7876                    acrtc_attach->base.state->event) {
7877                 drm_crtc_vblank_get(pcrtc);
7878
7879                 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7880
7881                 acrtc_attach->event = acrtc_attach->base.state->event;
7882                 acrtc_attach->base.state->event = NULL;
7883
7884                 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7885         }
7886
7887         /* Update the planes if changed or disable if we don't have any. */
7888         if ((planes_count || acrtc_state->active_planes == 0) &&
7889                 acrtc_state->stream) {
7890                 /*
7891                  * If PSR or idle optimizations are enabled then flush out
7892                  * any pending work before hardware programming.
7893                  */
7894                 if (dm->vblank_control_workqueue)
7895                         flush_workqueue(dm->vblank_control_workqueue);
7896
7897                 bundle->stream_update.stream = acrtc_state->stream;
7898                 if (new_pcrtc_state->mode_changed) {
7899                         bundle->stream_update.src = acrtc_state->stream->src;
7900                         bundle->stream_update.dst = acrtc_state->stream->dst;
7901                 }
7902
7903                 if (new_pcrtc_state->color_mgmt_changed) {
7904                         /*
7905                          * TODO: This isn't fully correct since we've actually
7906                          * already modified the stream in place.
7907                          */
7908                         bundle->stream_update.gamut_remap =
7909                                 &acrtc_state->stream->gamut_remap_matrix;
7910                         bundle->stream_update.output_csc_transform =
7911                                 &acrtc_state->stream->csc_color_matrix;
7912                         bundle->stream_update.out_transfer_func =
7913                                 acrtc_state->stream->out_transfer_func;
7914                 }
7915
7916                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
7917                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7918                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
7919
7920                 /*
7921                  * If FreeSync state on the stream has changed then we need to
7922                  * re-adjust the min/max bounds now that DC doesn't handle this
7923                  * as part of commit.
7924                  */
7925                 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
7926                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7927                         dc_stream_adjust_vmin_vmax(
7928                                 dm->dc, acrtc_state->stream,
7929                                 &acrtc_attach->dm_irq_params.vrr_params.adjust);
7930                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7931                 }
7932                 mutex_lock(&dm->dc_lock);
7933                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7934                                 acrtc_state->stream->link->psr_settings.psr_allow_active)
7935                         amdgpu_dm_psr_disable(acrtc_state->stream);
7936
7937                 dc_commit_updates_for_stream(dm->dc,
7938                                                      bundle->surface_updates,
7939                                                      planes_count,
7940                                                      acrtc_state->stream,
7941                                                      &bundle->stream_update,
7942                                                      dc_state);
7943
7944                 /**
7945                  * Enable or disable the interrupts on the backend.
7946                  *
7947                  * Most pipes are put into power gating when unused.
7948                  *
7949                  * When power gating is enabled on a pipe we lose the
7950                  * interrupt enablement state when power gating is disabled.
7951                  *
7952                  * So we need to update the IRQ control state in hardware
7953                  * whenever the pipe turns on (since it could be previously
7954                  * power gated) or off (since some pipes can't be power gated
7955                  * on some ASICs).
7956                  */
7957                 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7958                         dm_update_pflip_irq_state(drm_to_adev(dev),
7959                                                   acrtc_attach);
7960
7961                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7962                                 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7963                                 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7964                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
7965
7966                 /* Decrement skip count when PSR is enabled and we're doing fast updates. */
7967                 if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
7968                     acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
7969                         struct amdgpu_dm_connector *aconn =
7970                                 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
7971
7972                         if (aconn->psr_skip_count > 0)
7973                                 aconn->psr_skip_count--;
7974
7975                         /* Allow PSR when skip count is 0. */
7976                         acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
7977
7978                         /*
7979                          * If sink supports PSR SU, there is no need to rely on
7980                          * a vblank event disable request to enable PSR. PSR SU
7981                          * can be enabled immediately once OS demonstrates an
7982                          * adequate number of fast atomic commits to notify KMD
7983                          * of update events. See `vblank_control_worker()`.
7984                          */
7985                         if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
7986                             acrtc_attach->dm_irq_params.allow_psr_entry &&
7987 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
7988                             !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
7989 #endif
7990                             !acrtc_state->stream->link->psr_settings.psr_allow_active)
7991                                 amdgpu_dm_psr_enable(acrtc_state->stream);
7992                 } else {
7993                         acrtc_attach->dm_irq_params.allow_psr_entry = false;
7994                 }
7995
7996                 mutex_unlock(&dm->dc_lock);
7997         }
7998
7999         /*
8000          * Update cursor state *after* programming all the planes.
8001          * This avoids redundant programming in the case where we're going
8002          * to be disabling a single plane - those pipes are being disabled.
8003          */
8004         if (acrtc_state->active_planes)
8005                 amdgpu_dm_commit_cursors(state);
8006
8007 cleanup:
8008         kfree(bundle);
8009 }
8010
8011 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8012                                    struct drm_atomic_state *state)
8013 {
8014         struct amdgpu_device *adev = drm_to_adev(dev);
8015         struct amdgpu_dm_connector *aconnector;
8016         struct drm_connector *connector;
8017         struct drm_connector_state *old_con_state, *new_con_state;
8018         struct drm_crtc_state *new_crtc_state;
8019         struct dm_crtc_state *new_dm_crtc_state;
8020         const struct dc_stream_status *status;
8021         int i, inst;
8022
8023         /* Notify device removals. */
8024         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8025                 if (old_con_state->crtc != new_con_state->crtc) {
8026                         /* CRTC changes require notification. */
8027                         goto notify;
8028                 }
8029
8030                 if (!new_con_state->crtc)
8031                         continue;
8032
8033                 new_crtc_state = drm_atomic_get_new_crtc_state(
8034                         state, new_con_state->crtc);
8035
8036                 if (!new_crtc_state)
8037                         continue;
8038
8039                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8040                         continue;
8041
8042         notify:
8043                 aconnector = to_amdgpu_dm_connector(connector);
8044
8045                 mutex_lock(&adev->dm.audio_lock);
8046                 inst = aconnector->audio_inst;
8047                 aconnector->audio_inst = -1;
8048                 mutex_unlock(&adev->dm.audio_lock);
8049
8050                 amdgpu_dm_audio_eld_notify(adev, inst);
8051         }
8052
8053         /* Notify audio device additions. */
8054         for_each_new_connector_in_state(state, connector, new_con_state, i) {
8055                 if (!new_con_state->crtc)
8056                         continue;
8057
8058                 new_crtc_state = drm_atomic_get_new_crtc_state(
8059                         state, new_con_state->crtc);
8060
8061                 if (!new_crtc_state)
8062                         continue;
8063
8064                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8065                         continue;
8066
8067                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8068                 if (!new_dm_crtc_state->stream)
8069                         continue;
8070
8071                 status = dc_stream_get_status(new_dm_crtc_state->stream);
8072                 if (!status)
8073                         continue;
8074
8075                 aconnector = to_amdgpu_dm_connector(connector);
8076
8077                 mutex_lock(&adev->dm.audio_lock);
8078                 inst = status->audio_inst;
8079                 aconnector->audio_inst = inst;
8080                 mutex_unlock(&adev->dm.audio_lock);
8081
8082                 amdgpu_dm_audio_eld_notify(adev, inst);
8083         }
8084 }
8085
8086 /*
8087  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8088  * @crtc_state: the DRM CRTC state
8089  * @stream_state: the DC stream state.
8090  *
8091  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8092  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8093  */
8094 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8095                                                 struct dc_stream_state *stream_state)
8096 {
8097         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8098 }
8099
8100 /**
8101  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8102  * @state: The atomic state to commit
8103  *
8104  * This will tell DC to commit the constructed DC state from atomic_check,
8105  * programming the hardware. Any failures here implies a hardware failure, since
8106  * atomic check should have filtered anything non-kosher.
8107  */
8108 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8109 {
8110         struct drm_device *dev = state->dev;
8111         struct amdgpu_device *adev = drm_to_adev(dev);
8112         struct amdgpu_display_manager *dm = &adev->dm;
8113         struct dm_atomic_state *dm_state;
8114         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8115         uint32_t i, j;
8116         struct drm_crtc *crtc;
8117         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8118         unsigned long flags;
8119         bool wait_for_vblank = true;
8120         struct drm_connector *connector;
8121         struct drm_connector_state *old_con_state, *new_con_state;
8122         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8123         int crtc_disable_count = 0;
8124         bool mode_set_reset_required = false;
8125         int r;
8126
8127         trace_amdgpu_dm_atomic_commit_tail_begin(state);
8128
8129         r = drm_atomic_helper_wait_for_fences(dev, state, false);
8130         if (unlikely(r))
8131                 DRM_ERROR("Waiting for fences timed out!");
8132
8133         drm_atomic_helper_update_legacy_modeset_state(dev, state);
8134         drm_dp_mst_atomic_wait_for_dependencies(state);
8135
8136         dm_state = dm_atomic_get_new_state(state);
8137         if (dm_state && dm_state->context) {
8138                 dc_state = dm_state->context;
8139         } else {
8140                 /* No state changes, retain current state. */
8141                 dc_state_temp = dc_create_state(dm->dc);
8142                 ASSERT(dc_state_temp);
8143                 dc_state = dc_state_temp;
8144                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8145         }
8146
8147         for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8148                                        new_crtc_state, i) {
8149                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8150
8151                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8152
8153                 if (old_crtc_state->active &&
8154                     (!new_crtc_state->active ||
8155                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8156                         manage_dm_interrupts(adev, acrtc, false);
8157                         dc_stream_release(dm_old_crtc_state->stream);
8158                 }
8159         }
8160
8161         drm_atomic_helper_calc_timestamping_constants(state);
8162
8163         /* update changed items */
8164         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8165                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8166
8167                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8168                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8169
8170                 drm_dbg_state(state->dev,
8171                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8172                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8173                         "connectors_changed:%d\n",
8174                         acrtc->crtc_id,
8175                         new_crtc_state->enable,
8176                         new_crtc_state->active,
8177                         new_crtc_state->planes_changed,
8178                         new_crtc_state->mode_changed,
8179                         new_crtc_state->active_changed,
8180                         new_crtc_state->connectors_changed);
8181
8182                 /* Disable cursor if disabling crtc */
8183                 if (old_crtc_state->active && !new_crtc_state->active) {
8184                         struct dc_cursor_position position;
8185
8186                         memset(&position, 0, sizeof(position));
8187                         mutex_lock(&dm->dc_lock);
8188                         dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8189                         mutex_unlock(&dm->dc_lock);
8190                 }
8191
8192                 /* Copy all transient state flags into dc state */
8193                 if (dm_new_crtc_state->stream) {
8194                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8195                                                             dm_new_crtc_state->stream);
8196                 }
8197
8198                 /* handles headless hotplug case, updating new_state and
8199                  * aconnector as needed
8200                  */
8201
8202                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8203
8204                         DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8205
8206                         if (!dm_new_crtc_state->stream) {
8207                                 /*
8208                                  * this could happen because of issues with
8209                                  * userspace notifications delivery.
8210                                  * In this case userspace tries to set mode on
8211                                  * display which is disconnected in fact.
8212                                  * dc_sink is NULL in this case on aconnector.
8213                                  * We expect reset mode will come soon.
8214                                  *
8215                                  * This can also happen when unplug is done
8216                                  * during resume sequence ended
8217                                  *
8218                                  * In this case, we want to pretend we still
8219                                  * have a sink to keep the pipe running so that
8220                                  * hw state is consistent with the sw state
8221                                  */
8222                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8223                                                 __func__, acrtc->base.base.id);
8224                                 continue;
8225                         }
8226
8227                         if (dm_old_crtc_state->stream)
8228                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8229
8230                         pm_runtime_get_noresume(dev->dev);
8231
8232                         acrtc->enabled = true;
8233                         acrtc->hw_mode = new_crtc_state->mode;
8234                         crtc->hwmode = new_crtc_state->mode;
8235                         mode_set_reset_required = true;
8236                 } else if (modereset_required(new_crtc_state)) {
8237                         DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8238                         /* i.e. reset mode */
8239                         if (dm_old_crtc_state->stream)
8240                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8241
8242                         mode_set_reset_required = true;
8243                 }
8244         } /* for_each_crtc_in_state() */
8245
8246         if (dc_state) {
8247                 /* if there mode set or reset, disable eDP PSR */
8248                 if (mode_set_reset_required) {
8249                         if (dm->vblank_control_workqueue)
8250                                 flush_workqueue(dm->vblank_control_workqueue);
8251
8252                         amdgpu_dm_psr_disable_all(dm);
8253                 }
8254
8255                 dm_enable_per_frame_crtc_master_sync(dc_state);
8256                 mutex_lock(&dm->dc_lock);
8257                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
8258
8259                 /* Allow idle optimization when vblank count is 0 for display off */
8260                 if (dm->active_vblank_irq_count == 0)
8261                         dc_allow_idle_optimizations(dm->dc, true);
8262                 mutex_unlock(&dm->dc_lock);
8263         }
8264
8265         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8266                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8267
8268                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8269
8270                 if (dm_new_crtc_state->stream != NULL) {
8271                         const struct dc_stream_status *status =
8272                                         dc_stream_get_status(dm_new_crtc_state->stream);
8273
8274                         if (!status)
8275                                 status = dc_stream_get_status_from_state(dc_state,
8276                                                                          dm_new_crtc_state->stream);
8277                         if (!status)
8278                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8279                         else
8280                                 acrtc->otg_inst = status->primary_otg_inst;
8281                 }
8282         }
8283 #ifdef CONFIG_DRM_AMD_DC_HDCP
8284         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8285                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8286                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8287                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8288
8289                 new_crtc_state = NULL;
8290
8291                 if (acrtc)
8292                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8293
8294                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8295
8296                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8297                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8298                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8299                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8300                         dm_new_con_state->update_hdcp = true;
8301                         continue;
8302                 }
8303
8304                 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8305                         hdcp_update_display(
8306                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8307                                 new_con_state->hdcp_content_type,
8308                                 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
8309         }
8310 #endif
8311
8312         /* Handle connector state changes */
8313         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8314                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8315                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8316                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8317                 struct dc_surface_update dummy_updates[MAX_SURFACES];
8318                 struct dc_stream_update stream_update;
8319                 struct dc_info_packet hdr_packet;
8320                 struct dc_stream_status *status = NULL;
8321                 bool abm_changed, hdr_changed, scaling_changed;
8322
8323                 memset(&dummy_updates, 0, sizeof(dummy_updates));
8324                 memset(&stream_update, 0, sizeof(stream_update));
8325
8326                 if (acrtc) {
8327                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8328                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8329                 }
8330
8331                 /* Skip any modesets/resets */
8332                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8333                         continue;
8334
8335                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8336                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8337
8338                 scaling_changed = is_scaling_state_different(dm_new_con_state,
8339                                                              dm_old_con_state);
8340
8341                 abm_changed = dm_new_crtc_state->abm_level !=
8342                               dm_old_crtc_state->abm_level;
8343
8344                 hdr_changed =
8345                         !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
8346
8347                 if (!scaling_changed && !abm_changed && !hdr_changed)
8348                         continue;
8349
8350                 stream_update.stream = dm_new_crtc_state->stream;
8351                 if (scaling_changed) {
8352                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8353                                         dm_new_con_state, dm_new_crtc_state->stream);
8354
8355                         stream_update.src = dm_new_crtc_state->stream->src;
8356                         stream_update.dst = dm_new_crtc_state->stream->dst;
8357                 }
8358
8359                 if (abm_changed) {
8360                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8361
8362                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
8363                 }
8364
8365                 if (hdr_changed) {
8366                         fill_hdr_info_packet(new_con_state, &hdr_packet);
8367                         stream_update.hdr_static_metadata = &hdr_packet;
8368                 }
8369
8370                 status = dc_stream_get_status(dm_new_crtc_state->stream);
8371
8372                 if (WARN_ON(!status))
8373                         continue;
8374
8375                 WARN_ON(!status->plane_count);
8376
8377                 /*
8378                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
8379                  * Here we create an empty update on each plane.
8380                  * To fix this, DC should permit updating only stream properties.
8381                  */
8382                 for (j = 0; j < status->plane_count; j++)
8383                         dummy_updates[j].surface = status->plane_states[0];
8384
8385
8386                 mutex_lock(&dm->dc_lock);
8387                 dc_commit_updates_for_stream(dm->dc,
8388                                                      dummy_updates,
8389                                                      status->plane_count,
8390                                                      dm_new_crtc_state->stream,
8391                                                      &stream_update,
8392                                                      dc_state);
8393                 mutex_unlock(&dm->dc_lock);
8394         }
8395
8396         /**
8397          * Enable interrupts for CRTCs that are newly enabled or went through
8398          * a modeset. It was intentionally deferred until after the front end
8399          * state was modified to wait until the OTG was on and so the IRQ
8400          * handlers didn't access stale or invalid state.
8401          */
8402         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8403                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8404 #ifdef CONFIG_DEBUG_FS
8405                 enum amdgpu_dm_pipe_crc_source cur_crc_src;
8406 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8407                 struct crc_rd_work *crc_rd_wrk;
8408 #endif
8409 #endif
8410                 /* Count number of newly disabled CRTCs for dropping PM refs later. */
8411                 if (old_crtc_state->active && !new_crtc_state->active)
8412                         crtc_disable_count++;
8413
8414                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8415                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8416
8417                 /* For freesync config update on crtc state and params for irq */
8418                 update_stream_irq_parameters(dm, dm_new_crtc_state);
8419
8420 #ifdef CONFIG_DEBUG_FS
8421 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8422                 crc_rd_wrk = dm->crc_rd_wrk;
8423 #endif
8424                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8425                 cur_crc_src = acrtc->dm_irq_params.crc_src;
8426                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8427 #endif
8428
8429                 if (new_crtc_state->active &&
8430                     (!old_crtc_state->active ||
8431                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8432                         dc_stream_retain(dm_new_crtc_state->stream);
8433                         acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8434                         manage_dm_interrupts(adev, acrtc, true);
8435                 }
8436                 /* Handle vrr on->off / off->on transitions */
8437                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state, dm_new_crtc_state);
8438
8439 #ifdef CONFIG_DEBUG_FS
8440                 if (new_crtc_state->active &&
8441                     (!old_crtc_state->active ||
8442                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8443                         /**
8444                          * Frontend may have changed so reapply the CRC capture
8445                          * settings for the stream.
8446                          */
8447                         if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
8448 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8449                                 if (amdgpu_dm_crc_window_is_activated(crtc)) {
8450                                         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8451                                         acrtc->dm_irq_params.window_param.update_win = true;
8452                                         acrtc->dm_irq_params.window_param.skip_frame_cnt = 2;
8453                                         spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
8454                                         crc_rd_wrk->crtc = crtc;
8455                                         spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
8456                                         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8457                                 }
8458 #endif
8459                                 if (amdgpu_dm_crtc_configure_crc_source(
8460                                         crtc, dm_new_crtc_state, cur_crc_src))
8461                                         DRM_DEBUG_DRIVER("Failed to configure crc source");
8462                         }
8463                 }
8464 #endif
8465         }
8466
8467         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
8468                 if (new_crtc_state->async_flip)
8469                         wait_for_vblank = false;
8470
8471         /* update planes when needed per crtc*/
8472         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
8473                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8474
8475                 if (dm_new_crtc_state->stream)
8476                         amdgpu_dm_commit_planes(state, dc_state, dev,
8477                                                 dm, crtc, wait_for_vblank);
8478         }
8479
8480         /* Update audio instances for each connector. */
8481         amdgpu_dm_commit_audio(dev, state);
8482
8483         /* restore the backlight level */
8484         for (i = 0; i < dm->num_of_edps; i++) {
8485                 if (dm->backlight_dev[i] &&
8486                     (dm->actual_brightness[i] != dm->brightness[i]))
8487                         amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
8488         }
8489
8490         /*
8491          * send vblank event on all events not handled in flip and
8492          * mark consumed event for drm_atomic_helper_commit_hw_done
8493          */
8494         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8495         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8496
8497                 if (new_crtc_state->event)
8498                         drm_send_event_locked(dev, &new_crtc_state->event->base);
8499
8500                 new_crtc_state->event = NULL;
8501         }
8502         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8503
8504         /* Signal HW programming completion */
8505         drm_atomic_helper_commit_hw_done(state);
8506
8507         if (wait_for_vblank)
8508                 drm_atomic_helper_wait_for_flip_done(dev, state);
8509
8510         drm_atomic_helper_cleanup_planes(dev, state);
8511
8512         /* return the stolen vga memory back to VRAM */
8513         if (!adev->mman.keep_stolen_vga_memory)
8514                 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8515         amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8516
8517         /*
8518          * Finally, drop a runtime PM reference for each newly disabled CRTC,
8519          * so we can put the GPU into runtime suspend if we're not driving any
8520          * displays anymore
8521          */
8522         for (i = 0; i < crtc_disable_count; i++)
8523                 pm_runtime_put_autosuspend(dev->dev);
8524         pm_runtime_mark_last_busy(dev->dev);
8525
8526         if (dc_state_temp)
8527                 dc_release_state(dc_state_temp);
8528 }
8529
8530 static int dm_force_atomic_commit(struct drm_connector *connector)
8531 {
8532         int ret = 0;
8533         struct drm_device *ddev = connector->dev;
8534         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8535         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8536         struct drm_plane *plane = disconnected_acrtc->base.primary;
8537         struct drm_connector_state *conn_state;
8538         struct drm_crtc_state *crtc_state;
8539         struct drm_plane_state *plane_state;
8540
8541         if (!state)
8542                 return -ENOMEM;
8543
8544         state->acquire_ctx = ddev->mode_config.acquire_ctx;
8545
8546         /* Construct an atomic state to restore previous display setting */
8547
8548         /*
8549          * Attach connectors to drm_atomic_state
8550          */
8551         conn_state = drm_atomic_get_connector_state(state, connector);
8552
8553         ret = PTR_ERR_OR_ZERO(conn_state);
8554         if (ret)
8555                 goto out;
8556
8557         /* Attach crtc to drm_atomic_state*/
8558         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
8559
8560         ret = PTR_ERR_OR_ZERO(crtc_state);
8561         if (ret)
8562                 goto out;
8563
8564         /* force a restore */
8565         crtc_state->mode_changed = true;
8566
8567         /* Attach plane to drm_atomic_state */
8568         plane_state = drm_atomic_get_plane_state(state, plane);
8569
8570         ret = PTR_ERR_OR_ZERO(plane_state);
8571         if (ret)
8572                 goto out;
8573
8574         /* Call commit internally with the state we just constructed */
8575         ret = drm_atomic_commit(state);
8576
8577 out:
8578         drm_atomic_state_put(state);
8579         if (ret)
8580                 DRM_ERROR("Restoring old state failed with %i\n", ret);
8581
8582         return ret;
8583 }
8584
8585 /*
8586  * This function handles all cases when set mode does not come upon hotplug.
8587  * This includes when a display is unplugged then plugged back into the
8588  * same port and when running without usermode desktop manager supprot
8589  */
8590 void dm_restore_drm_connector_state(struct drm_device *dev,
8591                                     struct drm_connector *connector)
8592 {
8593         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8594         struct amdgpu_crtc *disconnected_acrtc;
8595         struct dm_crtc_state *acrtc_state;
8596
8597         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8598                 return;
8599
8600         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8601         if (!disconnected_acrtc)
8602                 return;
8603
8604         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8605         if (!acrtc_state->stream)
8606                 return;
8607
8608         /*
8609          * If the previous sink is not released and different from the current,
8610          * we deduce we are in a state where we can not rely on usermode call
8611          * to turn on the display, so we do it here
8612          */
8613         if (acrtc_state->stream->sink != aconnector->dc_sink)
8614                 dm_force_atomic_commit(&aconnector->base);
8615 }
8616
8617 /*
8618  * Grabs all modesetting locks to serialize against any blocking commits,
8619  * Waits for completion of all non blocking commits.
8620  */
8621 static int do_aquire_global_lock(struct drm_device *dev,
8622                                  struct drm_atomic_state *state)
8623 {
8624         struct drm_crtc *crtc;
8625         struct drm_crtc_commit *commit;
8626         long ret;
8627
8628         /*
8629          * Adding all modeset locks to aquire_ctx will
8630          * ensure that when the framework release it the
8631          * extra locks we are locking here will get released to
8632          */
8633         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8634         if (ret)
8635                 return ret;
8636
8637         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8638                 spin_lock(&crtc->commit_lock);
8639                 commit = list_first_entry_or_null(&crtc->commit_list,
8640                                 struct drm_crtc_commit, commit_entry);
8641                 if (commit)
8642                         drm_crtc_commit_get(commit);
8643                 spin_unlock(&crtc->commit_lock);
8644
8645                 if (!commit)
8646                         continue;
8647
8648                 /*
8649                  * Make sure all pending HW programming completed and
8650                  * page flips done
8651                  */
8652                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8653
8654                 if (ret > 0)
8655                         ret = wait_for_completion_interruptible_timeout(
8656                                         &commit->flip_done, 10*HZ);
8657
8658                 if (ret == 0)
8659                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
8660                                   "timed out\n", crtc->base.id, crtc->name);
8661
8662                 drm_crtc_commit_put(commit);
8663         }
8664
8665         return ret < 0 ? ret : 0;
8666 }
8667
8668 static void get_freesync_config_for_crtc(
8669         struct dm_crtc_state *new_crtc_state,
8670         struct dm_connector_state *new_con_state)
8671 {
8672         struct mod_freesync_config config = {0};
8673         struct amdgpu_dm_connector *aconnector =
8674                         to_amdgpu_dm_connector(new_con_state->base.connector);
8675         struct drm_display_mode *mode = &new_crtc_state->base.mode;
8676         int vrefresh = drm_mode_vrefresh(mode);
8677         bool fs_vid_mode = false;
8678
8679         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
8680                                         vrefresh >= aconnector->min_vfreq &&
8681                                         vrefresh <= aconnector->max_vfreq;
8682
8683         if (new_crtc_state->vrr_supported) {
8684                 new_crtc_state->stream->ignore_msa_timing_param = true;
8685                 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
8686
8687                 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
8688                 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
8689                 config.vsif_supported = true;
8690                 config.btr = true;
8691
8692                 if (fs_vid_mode) {
8693                         config.state = VRR_STATE_ACTIVE_FIXED;
8694                         config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
8695                         goto out;
8696                 } else if (new_crtc_state->base.vrr_enabled) {
8697                         config.state = VRR_STATE_ACTIVE_VARIABLE;
8698                 } else {
8699                         config.state = VRR_STATE_INACTIVE;
8700                 }
8701         }
8702 out:
8703         new_crtc_state->freesync_config = config;
8704 }
8705
8706 static void reset_freesync_config_for_crtc(
8707         struct dm_crtc_state *new_crtc_state)
8708 {
8709         new_crtc_state->vrr_supported = false;
8710
8711         memset(&new_crtc_state->vrr_infopacket, 0,
8712                sizeof(new_crtc_state->vrr_infopacket));
8713 }
8714
8715 static bool
8716 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
8717                                  struct drm_crtc_state *new_crtc_state)
8718 {
8719         const struct drm_display_mode *old_mode, *new_mode;
8720
8721         if (!old_crtc_state || !new_crtc_state)
8722                 return false;
8723
8724         old_mode = &old_crtc_state->mode;
8725         new_mode = &new_crtc_state->mode;
8726
8727         if (old_mode->clock       == new_mode->clock &&
8728             old_mode->hdisplay    == new_mode->hdisplay &&
8729             old_mode->vdisplay    == new_mode->vdisplay &&
8730             old_mode->htotal      == new_mode->htotal &&
8731             old_mode->vtotal      != new_mode->vtotal &&
8732             old_mode->hsync_start == new_mode->hsync_start &&
8733             old_mode->vsync_start != new_mode->vsync_start &&
8734             old_mode->hsync_end   == new_mode->hsync_end &&
8735             old_mode->vsync_end   != new_mode->vsync_end &&
8736             old_mode->hskew       == new_mode->hskew &&
8737             old_mode->vscan       == new_mode->vscan &&
8738             (old_mode->vsync_end - old_mode->vsync_start) ==
8739             (new_mode->vsync_end - new_mode->vsync_start))
8740                 return true;
8741
8742         return false;
8743 }
8744
8745 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
8746         uint64_t num, den, res;
8747         struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
8748
8749         dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
8750
8751         num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
8752         den = (unsigned long long)new_crtc_state->mode.htotal *
8753               (unsigned long long)new_crtc_state->mode.vtotal;
8754
8755         res = div_u64(num, den);
8756         dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
8757 }
8758
8759 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8760                          struct drm_atomic_state *state,
8761                          struct drm_crtc *crtc,
8762                          struct drm_crtc_state *old_crtc_state,
8763                          struct drm_crtc_state *new_crtc_state,
8764                          bool enable,
8765                          bool *lock_and_validation_needed)
8766 {
8767         struct dm_atomic_state *dm_state = NULL;
8768         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8769         struct dc_stream_state *new_stream;
8770         int ret = 0;
8771
8772         /*
8773          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8774          * update changed items
8775          */
8776         struct amdgpu_crtc *acrtc = NULL;
8777         struct amdgpu_dm_connector *aconnector = NULL;
8778         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8779         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
8780
8781         new_stream = NULL;
8782
8783         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8784         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8785         acrtc = to_amdgpu_crtc(crtc);
8786         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
8787
8788         /* TODO This hack should go away */
8789         if (aconnector && enable) {
8790                 /* Make sure fake sink is created in plug-in scenario */
8791                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8792                                                             &aconnector->base);
8793                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8794                                                             &aconnector->base);
8795
8796                 if (IS_ERR(drm_new_conn_state)) {
8797                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8798                         goto fail;
8799                 }
8800
8801                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8802                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8803
8804                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8805                         goto skip_modeset;
8806
8807                 new_stream = create_validate_stream_for_sink(aconnector,
8808                                                              &new_crtc_state->mode,
8809                                                              dm_new_conn_state,
8810                                                              dm_old_crtc_state->stream);
8811
8812                 /*
8813                  * we can have no stream on ACTION_SET if a display
8814                  * was disconnected during S3, in this case it is not an
8815                  * error, the OS will be updated after detection, and
8816                  * will do the right thing on next atomic commit
8817                  */
8818
8819                 if (!new_stream) {
8820                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8821                                         __func__, acrtc->base.base.id);
8822                         ret = -ENOMEM;
8823                         goto fail;
8824                 }
8825
8826                 /*
8827                  * TODO: Check VSDB bits to decide whether this should
8828                  * be enabled or not.
8829                  */
8830                 new_stream->triggered_crtc_reset.enabled =
8831                         dm->force_timing_sync;
8832
8833                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8834
8835                 ret = fill_hdr_info_packet(drm_new_conn_state,
8836                                            &new_stream->hdr_static_metadata);
8837                 if (ret)
8838                         goto fail;
8839
8840                 /*
8841                  * If we already removed the old stream from the context
8842                  * (and set the new stream to NULL) then we can't reuse
8843                  * the old stream even if the stream and scaling are unchanged.
8844                  * We'll hit the BUG_ON and black screen.
8845                  *
8846                  * TODO: Refactor this function to allow this check to work
8847                  * in all conditions.
8848                  */
8849                 if (amdgpu_freesync_vid_mode &&
8850                     dm_new_crtc_state->stream &&
8851                     is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
8852                         goto skip_modeset;
8853
8854                 if (dm_new_crtc_state->stream &&
8855                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8856                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8857                         new_crtc_state->mode_changed = false;
8858                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8859                                          new_crtc_state->mode_changed);
8860                 }
8861         }
8862
8863         /* mode_changed flag may get updated above, need to check again */
8864         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8865                 goto skip_modeset;
8866
8867         drm_dbg_state(state->dev,
8868                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8869                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8870                 "connectors_changed:%d\n",
8871                 acrtc->crtc_id,
8872                 new_crtc_state->enable,
8873                 new_crtc_state->active,
8874                 new_crtc_state->planes_changed,
8875                 new_crtc_state->mode_changed,
8876                 new_crtc_state->active_changed,
8877                 new_crtc_state->connectors_changed);
8878
8879         /* Remove stream for any changed/disabled CRTC */
8880         if (!enable) {
8881
8882                 if (!dm_old_crtc_state->stream)
8883                         goto skip_modeset;
8884
8885                 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
8886                     is_timing_unchanged_for_freesync(new_crtc_state,
8887                                                      old_crtc_state)) {
8888                         new_crtc_state->mode_changed = false;
8889                         DRM_DEBUG_DRIVER(
8890                                 "Mode change not required for front porch change, "
8891                                 "setting mode_changed to %d",
8892                                 new_crtc_state->mode_changed);
8893
8894                         set_freesync_fixed_config(dm_new_crtc_state);
8895
8896                         goto skip_modeset;
8897                 } else if (amdgpu_freesync_vid_mode && aconnector &&
8898                            is_freesync_video_mode(&new_crtc_state->mode,
8899                                                   aconnector)) {
8900                         struct drm_display_mode *high_mode;
8901
8902                         high_mode = get_highest_refresh_rate_mode(aconnector, false);
8903                         if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
8904                                 set_freesync_fixed_config(dm_new_crtc_state);
8905                         }
8906                 }
8907
8908                 ret = dm_atomic_get_state(state, &dm_state);
8909                 if (ret)
8910                         goto fail;
8911
8912                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8913                                 crtc->base.id);
8914
8915                 /* i.e. reset mode */
8916                 if (dc_remove_stream_from_ctx(
8917                                 dm->dc,
8918                                 dm_state->context,
8919                                 dm_old_crtc_state->stream) != DC_OK) {
8920                         ret = -EINVAL;
8921                         goto fail;
8922                 }
8923
8924                 dc_stream_release(dm_old_crtc_state->stream);
8925                 dm_new_crtc_state->stream = NULL;
8926
8927                 reset_freesync_config_for_crtc(dm_new_crtc_state);
8928
8929                 *lock_and_validation_needed = true;
8930
8931         } else {/* Add stream for any updated/enabled CRTC */
8932                 /*
8933                  * Quick fix to prevent NULL pointer on new_stream when
8934                  * added MST connectors not found in existing crtc_state in the chained mode
8935                  * TODO: need to dig out the root cause of that
8936                  */
8937                 if (!aconnector)
8938                         goto skip_modeset;
8939
8940                 if (modereset_required(new_crtc_state))
8941                         goto skip_modeset;
8942
8943                 if (modeset_required(new_crtc_state, new_stream,
8944                                      dm_old_crtc_state->stream)) {
8945
8946                         WARN_ON(dm_new_crtc_state->stream);
8947
8948                         ret = dm_atomic_get_state(state, &dm_state);
8949                         if (ret)
8950                                 goto fail;
8951
8952                         dm_new_crtc_state->stream = new_stream;
8953
8954                         dc_stream_retain(new_stream);
8955
8956                         DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
8957                                          crtc->base.id);
8958
8959                         if (dc_add_stream_to_ctx(
8960                                         dm->dc,
8961                                         dm_state->context,
8962                                         dm_new_crtc_state->stream) != DC_OK) {
8963                                 ret = -EINVAL;
8964                                 goto fail;
8965                         }
8966
8967                         *lock_and_validation_needed = true;
8968                 }
8969         }
8970
8971 skip_modeset:
8972         /* Release extra reference */
8973         if (new_stream)
8974                  dc_stream_release(new_stream);
8975
8976         /*
8977          * We want to do dc stream updates that do not require a
8978          * full modeset below.
8979          */
8980         if (!(enable && aconnector && new_crtc_state->active))
8981                 return 0;
8982         /*
8983          * Given above conditions, the dc state cannot be NULL because:
8984          * 1. We're in the process of enabling CRTCs (just been added
8985          *    to the dc context, or already is on the context)
8986          * 2. Has a valid connector attached, and
8987          * 3. Is currently active and enabled.
8988          * => The dc stream state currently exists.
8989          */
8990         BUG_ON(dm_new_crtc_state->stream == NULL);
8991
8992         /* Scaling or underscan settings */
8993         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
8994                                 drm_atomic_crtc_needs_modeset(new_crtc_state))
8995                 update_stream_scaling_settings(
8996                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8997
8998         /* ABM settings */
8999         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9000
9001         /*
9002          * Color management settings. We also update color properties
9003          * when a modeset is needed, to ensure it gets reprogrammed.
9004          */
9005         if (dm_new_crtc_state->base.color_mgmt_changed ||
9006             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9007                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
9008                 if (ret)
9009                         goto fail;
9010         }
9011
9012         /* Update Freesync settings. */
9013         get_freesync_config_for_crtc(dm_new_crtc_state,
9014                                      dm_new_conn_state);
9015
9016         return ret;
9017
9018 fail:
9019         if (new_stream)
9020                 dc_stream_release(new_stream);
9021         return ret;
9022 }
9023
9024 static bool should_reset_plane(struct drm_atomic_state *state,
9025                                struct drm_plane *plane,
9026                                struct drm_plane_state *old_plane_state,
9027                                struct drm_plane_state *new_plane_state)
9028 {
9029         struct drm_plane *other;
9030         struct drm_plane_state *old_other_state, *new_other_state;
9031         struct drm_crtc_state *new_crtc_state;
9032         int i;
9033
9034         /*
9035          * TODO: Remove this hack once the checks below are sufficient
9036          * enough to determine when we need to reset all the planes on
9037          * the stream.
9038          */
9039         if (state->allow_modeset)
9040                 return true;
9041
9042         /* Exit early if we know that we're adding or removing the plane. */
9043         if (old_plane_state->crtc != new_plane_state->crtc)
9044                 return true;
9045
9046         /* old crtc == new_crtc == NULL, plane not in context. */
9047         if (!new_plane_state->crtc)
9048                 return false;
9049
9050         new_crtc_state =
9051                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9052
9053         if (!new_crtc_state)
9054                 return true;
9055
9056         /* CRTC Degamma changes currently require us to recreate planes. */
9057         if (new_crtc_state->color_mgmt_changed)
9058                 return true;
9059
9060         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9061                 return true;
9062
9063         /*
9064          * If there are any new primary or overlay planes being added or
9065          * removed then the z-order can potentially change. To ensure
9066          * correct z-order and pipe acquisition the current DC architecture
9067          * requires us to remove and recreate all existing planes.
9068          *
9069          * TODO: Come up with a more elegant solution for this.
9070          */
9071         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9072                 struct amdgpu_framebuffer *old_afb, *new_afb;
9073                 if (other->type == DRM_PLANE_TYPE_CURSOR)
9074                         continue;
9075
9076                 if (old_other_state->crtc != new_plane_state->crtc &&
9077                     new_other_state->crtc != new_plane_state->crtc)
9078                         continue;
9079
9080                 if (old_other_state->crtc != new_other_state->crtc)
9081                         return true;
9082
9083                 /* Src/dst size and scaling updates. */
9084                 if (old_other_state->src_w != new_other_state->src_w ||
9085                     old_other_state->src_h != new_other_state->src_h ||
9086                     old_other_state->crtc_w != new_other_state->crtc_w ||
9087                     old_other_state->crtc_h != new_other_state->crtc_h)
9088                         return true;
9089
9090                 /* Rotation / mirroring updates. */
9091                 if (old_other_state->rotation != new_other_state->rotation)
9092                         return true;
9093
9094                 /* Blending updates. */
9095                 if (old_other_state->pixel_blend_mode !=
9096                     new_other_state->pixel_blend_mode)
9097                         return true;
9098
9099                 /* Alpha updates. */
9100                 if (old_other_state->alpha != new_other_state->alpha)
9101                         return true;
9102
9103                 /* Colorspace changes. */
9104                 if (old_other_state->color_range != new_other_state->color_range ||
9105                     old_other_state->color_encoding != new_other_state->color_encoding)
9106                         return true;
9107
9108                 /* Framebuffer checks fall at the end. */
9109                 if (!old_other_state->fb || !new_other_state->fb)
9110                         continue;
9111
9112                 /* Pixel format changes can require bandwidth updates. */
9113                 if (old_other_state->fb->format != new_other_state->fb->format)
9114                         return true;
9115
9116                 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9117                 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9118
9119                 /* Tiling and DCC changes also require bandwidth updates. */
9120                 if (old_afb->tiling_flags != new_afb->tiling_flags ||
9121                     old_afb->base.modifier != new_afb->base.modifier)
9122                         return true;
9123         }
9124
9125         return false;
9126 }
9127
9128 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9129                               struct drm_plane_state *new_plane_state,
9130                               struct drm_framebuffer *fb)
9131 {
9132         struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9133         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9134         unsigned int pitch;
9135         bool linear;
9136
9137         if (fb->width > new_acrtc->max_cursor_width ||
9138             fb->height > new_acrtc->max_cursor_height) {
9139                 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9140                                  new_plane_state->fb->width,
9141                                  new_plane_state->fb->height);
9142                 return -EINVAL;
9143         }
9144         if (new_plane_state->src_w != fb->width << 16 ||
9145             new_plane_state->src_h != fb->height << 16) {
9146                 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9147                 return -EINVAL;
9148         }
9149
9150         /* Pitch in pixels */
9151         pitch = fb->pitches[0] / fb->format->cpp[0];
9152
9153         if (fb->width != pitch) {
9154                 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9155                                  fb->width, pitch);
9156                 return -EINVAL;
9157         }
9158
9159         switch (pitch) {
9160         case 64:
9161         case 128:
9162         case 256:
9163                 /* FB pitch is supported by cursor plane */
9164                 break;
9165         default:
9166                 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9167                 return -EINVAL;
9168         }
9169
9170         /* Core DRM takes care of checking FB modifiers, so we only need to
9171          * check tiling flags when the FB doesn't have a modifier. */
9172         if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9173                 if (adev->family < AMDGPU_FAMILY_AI) {
9174                         linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9175                                  AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9176                                  AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9177                 } else {
9178                         linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9179                 }
9180                 if (!linear) {
9181                         DRM_DEBUG_ATOMIC("Cursor FB not linear");
9182                         return -EINVAL;
9183                 }
9184         }
9185
9186         return 0;
9187 }
9188
9189 static int dm_update_plane_state(struct dc *dc,
9190                                  struct drm_atomic_state *state,
9191                                  struct drm_plane *plane,
9192                                  struct drm_plane_state *old_plane_state,
9193                                  struct drm_plane_state *new_plane_state,
9194                                  bool enable,
9195                                  bool *lock_and_validation_needed)
9196 {
9197
9198         struct dm_atomic_state *dm_state = NULL;
9199         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9200         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9201         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9202         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9203         struct amdgpu_crtc *new_acrtc;
9204         bool needs_reset;
9205         int ret = 0;
9206
9207
9208         new_plane_crtc = new_plane_state->crtc;
9209         old_plane_crtc = old_plane_state->crtc;
9210         dm_new_plane_state = to_dm_plane_state(new_plane_state);
9211         dm_old_plane_state = to_dm_plane_state(old_plane_state);
9212
9213         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9214                 if (!enable || !new_plane_crtc ||
9215                         drm_atomic_plane_disabling(plane->state, new_plane_state))
9216                         return 0;
9217
9218                 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9219
9220                 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9221                         DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9222                         return -EINVAL;
9223                 }
9224
9225                 if (new_plane_state->fb) {
9226                         ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9227                                                  new_plane_state->fb);
9228                         if (ret)
9229                                 return ret;
9230                 }
9231
9232                 return 0;
9233         }
9234
9235         needs_reset = should_reset_plane(state, plane, old_plane_state,
9236                                          new_plane_state);
9237
9238         /* Remove any changed/removed planes */
9239         if (!enable) {
9240                 if (!needs_reset)
9241                         return 0;
9242
9243                 if (!old_plane_crtc)
9244                         return 0;
9245
9246                 old_crtc_state = drm_atomic_get_old_crtc_state(
9247                                 state, old_plane_crtc);
9248                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9249
9250                 if (!dm_old_crtc_state->stream)
9251                         return 0;
9252
9253                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9254                                 plane->base.id, old_plane_crtc->base.id);
9255
9256                 ret = dm_atomic_get_state(state, &dm_state);
9257                 if (ret)
9258                         return ret;
9259
9260                 if (!dc_remove_plane_from_context(
9261                                 dc,
9262                                 dm_old_crtc_state->stream,
9263                                 dm_old_plane_state->dc_state,
9264                                 dm_state->context)) {
9265
9266                         return -EINVAL;
9267                 }
9268
9269
9270                 dc_plane_state_release(dm_old_plane_state->dc_state);
9271                 dm_new_plane_state->dc_state = NULL;
9272
9273                 *lock_and_validation_needed = true;
9274
9275         } else { /* Add new planes */
9276                 struct dc_plane_state *dc_new_plane_state;
9277
9278                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9279                         return 0;
9280
9281                 if (!new_plane_crtc)
9282                         return 0;
9283
9284                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9285                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9286
9287                 if (!dm_new_crtc_state->stream)
9288                         return 0;
9289
9290                 if (!needs_reset)
9291                         return 0;
9292
9293                 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9294                 if (ret)
9295                         return ret;
9296
9297                 WARN_ON(dm_new_plane_state->dc_state);
9298
9299                 dc_new_plane_state = dc_create_plane_state(dc);
9300                 if (!dc_new_plane_state)
9301                         return -ENOMEM;
9302
9303                 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
9304                                  plane->base.id, new_plane_crtc->base.id);
9305
9306                 ret = fill_dc_plane_attributes(
9307                         drm_to_adev(new_plane_crtc->dev),
9308                         dc_new_plane_state,
9309                         new_plane_state,
9310                         new_crtc_state);
9311                 if (ret) {
9312                         dc_plane_state_release(dc_new_plane_state);
9313                         return ret;
9314                 }
9315
9316                 ret = dm_atomic_get_state(state, &dm_state);
9317                 if (ret) {
9318                         dc_plane_state_release(dc_new_plane_state);
9319                         return ret;
9320                 }
9321
9322                 /*
9323                  * Any atomic check errors that occur after this will
9324                  * not need a release. The plane state will be attached
9325                  * to the stream, and therefore part of the atomic
9326                  * state. It'll be released when the atomic state is
9327                  * cleaned.
9328                  */
9329                 if (!dc_add_plane_to_context(
9330                                 dc,
9331                                 dm_new_crtc_state->stream,
9332                                 dc_new_plane_state,
9333                                 dm_state->context)) {
9334
9335                         dc_plane_state_release(dc_new_plane_state);
9336                         return -EINVAL;
9337                 }
9338
9339                 dm_new_plane_state->dc_state = dc_new_plane_state;
9340
9341                 dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
9342
9343                 /* Tell DC to do a full surface update every time there
9344                  * is a plane change. Inefficient, but works for now.
9345                  */
9346                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9347
9348                 *lock_and_validation_needed = true;
9349         }
9350
9351
9352         return ret;
9353 }
9354
9355 static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
9356                                        int *src_w, int *src_h)
9357 {
9358         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
9359         case DRM_MODE_ROTATE_90:
9360         case DRM_MODE_ROTATE_270:
9361                 *src_w = plane_state->src_h >> 16;
9362                 *src_h = plane_state->src_w >> 16;
9363                 break;
9364         case DRM_MODE_ROTATE_0:
9365         case DRM_MODE_ROTATE_180:
9366         default:
9367                 *src_w = plane_state->src_w >> 16;
9368                 *src_h = plane_state->src_h >> 16;
9369                 break;
9370         }
9371 }
9372
9373 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9374                                 struct drm_crtc *crtc,
9375                                 struct drm_crtc_state *new_crtc_state)
9376 {
9377         struct drm_plane *cursor = crtc->cursor, *underlying;
9378         struct drm_plane_state *new_cursor_state, *new_underlying_state;
9379         int i;
9380         int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
9381         int cursor_src_w, cursor_src_h;
9382         int underlying_src_w, underlying_src_h;
9383
9384         /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9385          * cursor per pipe but it's going to inherit the scaling and
9386          * positioning from the underlying pipe. Check the cursor plane's
9387          * blending properties match the underlying planes'. */
9388
9389         new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
9390         if (!new_cursor_state || !new_cursor_state->fb) {
9391                 return 0;
9392         }
9393
9394         dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
9395         cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
9396         cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
9397
9398         for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
9399                 /* Narrow down to non-cursor planes on the same CRTC as the cursor */
9400                 if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
9401                         continue;
9402
9403                 /* Ignore disabled planes */
9404                 if (!new_underlying_state->fb)
9405                         continue;
9406
9407                 dm_get_oriented_plane_size(new_underlying_state,
9408                                            &underlying_src_w, &underlying_src_h);
9409                 underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
9410                 underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
9411
9412                 if (cursor_scale_w != underlying_scale_w ||
9413                     cursor_scale_h != underlying_scale_h) {
9414                         drm_dbg_atomic(crtc->dev,
9415                                        "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
9416                                        cursor->base.id, cursor->name, underlying->base.id, underlying->name);
9417                         return -EINVAL;
9418                 }
9419
9420                 /* If this plane covers the whole CRTC, no need to check planes underneath */
9421                 if (new_underlying_state->crtc_x <= 0 &&
9422                     new_underlying_state->crtc_y <= 0 &&
9423                     new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
9424                     new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
9425                         break;
9426         }
9427
9428         return 0;
9429 }
9430
9431 #if defined(CONFIG_DRM_AMD_DC_DCN)
9432 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9433 {
9434         struct drm_connector *connector;
9435         struct drm_connector_state *conn_state, *old_conn_state;
9436         struct amdgpu_dm_connector *aconnector = NULL;
9437         int i;
9438         for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
9439                 if (!conn_state->crtc)
9440                         conn_state = old_conn_state;
9441
9442                 if (conn_state->crtc != crtc)
9443                         continue;
9444
9445                 aconnector = to_amdgpu_dm_connector(connector);
9446                 if (!aconnector->port || !aconnector->mst_port)
9447                         aconnector = NULL;
9448                 else
9449                         break;
9450         }
9451
9452         if (!aconnector)
9453                 return 0;
9454
9455         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9456 }
9457 #endif
9458
9459 /**
9460  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9461  *
9462  * @dev: The DRM device
9463  * @state: The atomic state to commit
9464  *
9465  * Validate that the given atomic state is programmable by DC into hardware.
9466  * This involves constructing a &struct dc_state reflecting the new hardware
9467  * state we wish to commit, then querying DC to see if it is programmable. It's
9468  * important not to modify the existing DC state. Otherwise, atomic_check
9469  * may unexpectedly commit hardware changes.
9470  *
9471  * When validating the DC state, it's important that the right locks are
9472  * acquired. For full updates case which removes/adds/updates streams on one
9473  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9474  * that any such full update commit will wait for completion of any outstanding
9475  * flip using DRMs synchronization events.
9476  *
9477  * Note that DM adds the affected connectors for all CRTCs in state, when that
9478  * might not seem necessary. This is because DC stream creation requires the
9479  * DC sink, which is tied to the DRM connector state. Cleaning this up should
9480  * be possible but non-trivial - a possible TODO item.
9481  *
9482  * Return: -Error code if validation failed.
9483  */
9484 static int amdgpu_dm_atomic_check(struct drm_device *dev,
9485                                   struct drm_atomic_state *state)
9486 {
9487         struct amdgpu_device *adev = drm_to_adev(dev);
9488         struct dm_atomic_state *dm_state = NULL;
9489         struct dc *dc = adev->dm.dc;
9490         struct drm_connector *connector;
9491         struct drm_connector_state *old_con_state, *new_con_state;
9492         struct drm_crtc *crtc;
9493         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9494         struct drm_plane *plane;
9495         struct drm_plane_state *old_plane_state, *new_plane_state;
9496         enum dc_status status;
9497         int ret, i;
9498         bool lock_and_validation_needed = false;
9499         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9500 #if defined(CONFIG_DRM_AMD_DC_DCN)
9501         struct dsc_mst_fairness_vars vars[MAX_PIPES];
9502 #endif
9503
9504         trace_amdgpu_dm_atomic_check_begin(state);
9505
9506         ret = drm_atomic_helper_check_modeset(dev, state);
9507         if (ret) {
9508                 DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
9509                 goto fail;
9510         }
9511
9512         /* Check connector changes */
9513         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9514                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9515                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9516
9517                 /* Skip connectors that are disabled or part of modeset already. */
9518                 if (!new_con_state->crtc)
9519                         continue;
9520
9521                 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9522                 if (IS_ERR(new_crtc_state)) {
9523                         DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
9524                         ret = PTR_ERR(new_crtc_state);
9525                         goto fail;
9526                 }
9527
9528                 if (dm_old_con_state->abm_level != dm_new_con_state->abm_level ||
9529                     dm_old_con_state->scaling != dm_new_con_state->scaling)
9530                         new_crtc_state->connectors_changed = true;
9531         }
9532
9533 #if defined(CONFIG_DRM_AMD_DC_DCN)
9534         if (dc_resource_is_dsc_encoding_supported(dc)) {
9535                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9536                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9537                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
9538                                 if (ret) {
9539                                         DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
9540                                         goto fail;
9541                                 }
9542                         }
9543                 }
9544         }
9545 #endif
9546         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9547                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9548
9549                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
9550                     !new_crtc_state->color_mgmt_changed &&
9551                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9552                         dm_old_crtc_state->dsc_force_changed == false)
9553                         continue;
9554
9555                 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
9556                 if (ret) {
9557                         DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
9558                         goto fail;
9559                 }
9560
9561                 if (!new_crtc_state->enable)
9562                         continue;
9563
9564                 ret = drm_atomic_add_affected_connectors(state, crtc);
9565                 if (ret) {
9566                         DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
9567                         goto fail;
9568                 }
9569
9570                 ret = drm_atomic_add_affected_planes(state, crtc);
9571                 if (ret) {
9572                         DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
9573                         goto fail;
9574                 }
9575
9576                 if (dm_old_crtc_state->dsc_force_changed)
9577                         new_crtc_state->mode_changed = true;
9578         }
9579
9580         /*
9581          * Add all primary and overlay planes on the CRTC to the state
9582          * whenever a plane is enabled to maintain correct z-ordering
9583          * and to enable fast surface updates.
9584          */
9585         drm_for_each_crtc(crtc, dev) {
9586                 bool modified = false;
9587
9588                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9589                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
9590                                 continue;
9591
9592                         if (new_plane_state->crtc == crtc ||
9593                             old_plane_state->crtc == crtc) {
9594                                 modified = true;
9595                                 break;
9596                         }
9597                 }
9598
9599                 if (!modified)
9600                         continue;
9601
9602                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
9603                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
9604                                 continue;
9605
9606                         new_plane_state =
9607                                 drm_atomic_get_plane_state(state, plane);
9608
9609                         if (IS_ERR(new_plane_state)) {
9610                                 ret = PTR_ERR(new_plane_state);
9611                                 DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
9612                                 goto fail;
9613                         }
9614                 }
9615         }
9616
9617         /*
9618          * DC consults the zpos (layer_index in DC terminology) to determine the
9619          * hw plane on which to enable the hw cursor (see
9620          * `dcn10_can_pipe_disable_cursor`). By now, all modified planes are in
9621          * atomic state, so call drm helper to normalize zpos.
9622          */
9623         drm_atomic_normalize_zpos(dev, state);
9624
9625         /* Remove exiting planes if they are modified */
9626         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9627                 ret = dm_update_plane_state(dc, state, plane,
9628                                             old_plane_state,
9629                                             new_plane_state,
9630                                             false,
9631                                             &lock_and_validation_needed);
9632                 if (ret) {
9633                         DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
9634                         goto fail;
9635                 }
9636         }
9637
9638         /* Disable all crtcs which require disable */
9639         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9640                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9641                                            old_crtc_state,
9642                                            new_crtc_state,
9643                                            false,
9644                                            &lock_and_validation_needed);
9645                 if (ret) {
9646                         DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
9647                         goto fail;
9648                 }
9649         }
9650
9651         /* Enable all crtcs which require enable */
9652         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9653                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9654                                            old_crtc_state,
9655                                            new_crtc_state,
9656                                            true,
9657                                            &lock_and_validation_needed);
9658                 if (ret) {
9659                         DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
9660                         goto fail;
9661                 }
9662         }
9663
9664         /* Add new/modified planes */
9665         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9666                 ret = dm_update_plane_state(dc, state, plane,
9667                                             old_plane_state,
9668                                             new_plane_state,
9669                                             true,
9670                                             &lock_and_validation_needed);
9671                 if (ret) {
9672                         DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
9673                         goto fail;
9674                 }
9675         }
9676
9677 #if defined(CONFIG_DRM_AMD_DC_DCN)
9678         if (dc_resource_is_dsc_encoding_supported(dc)) {
9679                 ret = pre_validate_dsc(state, &dm_state, vars);
9680                 if (ret != 0)
9681                         goto fail;
9682         }
9683 #endif
9684
9685         /* Run this here since we want to validate the streams we created */
9686         ret = drm_atomic_helper_check_planes(dev, state);
9687         if (ret) {
9688                 DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
9689                 goto fail;
9690         }
9691
9692         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9693                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9694                 if (dm_new_crtc_state->mpo_requested)
9695                         DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
9696         }
9697
9698         /* Check cursor planes scaling */
9699         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9700                 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
9701                 if (ret) {
9702                         DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
9703                         goto fail;
9704                 }
9705         }
9706
9707         if (state->legacy_cursor_update) {
9708                 /*
9709                  * This is a fast cursor update coming from the plane update
9710                  * helper, check if it can be done asynchronously for better
9711                  * performance.
9712                  */
9713                 state->async_update =
9714                         !drm_atomic_helper_async_check(dev, state);
9715
9716                 /*
9717                  * Skip the remaining global validation if this is an async
9718                  * update. Cursor updates can be done without affecting
9719                  * state or bandwidth calcs and this avoids the performance
9720                  * penalty of locking the private state object and
9721                  * allocating a new dc_state.
9722                  */
9723                 if (state->async_update)
9724                         return 0;
9725         }
9726
9727         /* Check scaling and underscan changes*/
9728         /* TODO Removed scaling changes validation due to inability to commit
9729          * new stream into context w\o causing full reset. Need to
9730          * decide how to handle.
9731          */
9732         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9733                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9734                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9735                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9736
9737                 /* Skip any modesets/resets */
9738                 if (!acrtc || drm_atomic_crtc_needs_modeset(
9739                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
9740                         continue;
9741
9742                 /* Skip any thing not scale or underscan changes */
9743                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
9744                         continue;
9745
9746                 lock_and_validation_needed = true;
9747         }
9748
9749         /**
9750          * Streams and planes are reset when there are changes that affect
9751          * bandwidth. Anything that affects bandwidth needs to go through
9752          * DC global validation to ensure that the configuration can be applied
9753          * to hardware.
9754          *
9755          * We have to currently stall out here in atomic_check for outstanding
9756          * commits to finish in this case because our IRQ handlers reference
9757          * DRM state directly - we can end up disabling interrupts too early
9758          * if we don't.
9759          *
9760          * TODO: Remove this stall and drop DM state private objects.
9761          */
9762         if (lock_and_validation_needed) {
9763                 ret = dm_atomic_get_state(state, &dm_state);
9764                 if (ret) {
9765                         DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
9766                         goto fail;
9767                 }
9768
9769                 ret = do_aquire_global_lock(dev, state);
9770                 if (ret) {
9771                         DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
9772                         goto fail;
9773                 }
9774
9775 #if defined(CONFIG_DRM_AMD_DC_DCN)
9776                 ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars);
9777                 if (ret) {
9778                         DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
9779                         goto fail;
9780                 }
9781
9782                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
9783                 if (ret) {
9784                         DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
9785                         goto fail;
9786                 }
9787 #endif
9788
9789                 /*
9790                  * Perform validation of MST topology in the state:
9791                  * We need to perform MST atomic check before calling
9792                  * dc_validate_global_state(), or there is a chance
9793                  * to get stuck in an infinite loop and hang eventually.
9794                  */
9795                 ret = drm_dp_mst_atomic_check(state);
9796                 if (ret) {
9797                         DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
9798                         goto fail;
9799                 }
9800                 status = dc_validate_global_state(dc, dm_state->context, true);
9801                 if (status != DC_OK) {
9802                         DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
9803                                        dc_status_to_str(status), status);
9804                         ret = -EINVAL;
9805                         goto fail;
9806                 }
9807         } else {
9808                 /*
9809                  * The commit is a fast update. Fast updates shouldn't change
9810                  * the DC context, affect global validation, and can have their
9811                  * commit work done in parallel with other commits not touching
9812                  * the same resource. If we have a new DC context as part of
9813                  * the DM atomic state from validation we need to free it and
9814                  * retain the existing one instead.
9815                  *
9816                  * Furthermore, since the DM atomic state only contains the DC
9817                  * context and can safely be annulled, we can free the state
9818                  * and clear the associated private object now to free
9819                  * some memory and avoid a possible use-after-free later.
9820                  */
9821
9822                 for (i = 0; i < state->num_private_objs; i++) {
9823                         struct drm_private_obj *obj = state->private_objs[i].ptr;
9824
9825                         if (obj->funcs == adev->dm.atomic_obj.funcs) {
9826                                 int j = state->num_private_objs-1;
9827
9828                                 dm_atomic_destroy_state(obj,
9829                                                 state->private_objs[i].state);
9830
9831                                 /* If i is not at the end of the array then the
9832                                  * last element needs to be moved to where i was
9833                                  * before the array can safely be truncated.
9834                                  */
9835                                 if (i != j)
9836                                         state->private_objs[i] =
9837                                                 state->private_objs[j];
9838
9839                                 state->private_objs[j].ptr = NULL;
9840                                 state->private_objs[j].state = NULL;
9841                                 state->private_objs[j].old_state = NULL;
9842                                 state->private_objs[j].new_state = NULL;
9843
9844                                 state->num_private_objs = j;
9845                                 break;
9846                         }
9847                 }
9848         }
9849
9850         /* Store the overall update type for use later in atomic check. */
9851         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
9852                 struct dm_crtc_state *dm_new_crtc_state =
9853                         to_dm_crtc_state(new_crtc_state);
9854
9855                 dm_new_crtc_state->update_type = lock_and_validation_needed ?
9856                                                          UPDATE_TYPE_FULL :
9857                                                          UPDATE_TYPE_FAST;
9858         }
9859
9860         /* Must be success */
9861         WARN_ON(ret);
9862
9863         trace_amdgpu_dm_atomic_check_finish(state, ret);
9864
9865         return ret;
9866
9867 fail:
9868         if (ret == -EDEADLK)
9869                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
9870         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
9871                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
9872         else
9873                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
9874
9875         trace_amdgpu_dm_atomic_check_finish(state, ret);
9876
9877         return ret;
9878 }
9879
9880 static bool is_dp_capable_without_timing_msa(struct dc *dc,
9881                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
9882 {
9883         uint8_t dpcd_data;
9884         bool capable = false;
9885
9886         if (amdgpu_dm_connector->dc_link &&
9887                 dm_helpers_dp_read_dpcd(
9888                                 NULL,
9889                                 amdgpu_dm_connector->dc_link,
9890                                 DP_DOWN_STREAM_PORT_COUNT,
9891                                 &dpcd_data,
9892                                 sizeof(dpcd_data))) {
9893                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
9894         }
9895
9896         return capable;
9897 }
9898
9899 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
9900                 unsigned int offset,
9901                 unsigned int total_length,
9902                 uint8_t *data,
9903                 unsigned int length,
9904                 struct amdgpu_hdmi_vsdb_info *vsdb)
9905 {
9906         bool res;
9907         union dmub_rb_cmd cmd;
9908         struct dmub_cmd_send_edid_cea *input;
9909         struct dmub_cmd_edid_cea_output *output;
9910
9911         if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
9912                 return false;
9913
9914         memset(&cmd, 0, sizeof(cmd));
9915
9916         input = &cmd.edid_cea.data.input;
9917
9918         cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
9919         cmd.edid_cea.header.sub_type = 0;
9920         cmd.edid_cea.header.payload_bytes =
9921                 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
9922         input->offset = offset;
9923         input->length = length;
9924         input->cea_total_length = total_length;
9925         memcpy(input->payload, data, length);
9926
9927         res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
9928         if (!res) {
9929                 DRM_ERROR("EDID CEA parser failed\n");
9930                 return false;
9931         }
9932
9933         output = &cmd.edid_cea.data.output;
9934
9935         if (output->type == DMUB_CMD__EDID_CEA_ACK) {
9936                 if (!output->ack.success) {
9937                         DRM_ERROR("EDID CEA ack failed at offset %d\n",
9938                                         output->ack.offset);
9939                 }
9940         } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
9941                 if (!output->amd_vsdb.vsdb_found)
9942                         return false;
9943
9944                 vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
9945                 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
9946                 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
9947                 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
9948         } else {
9949                 DRM_WARN("Unknown EDID CEA parser results\n");
9950                 return false;
9951         }
9952
9953         return true;
9954 }
9955
9956 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
9957                 uint8_t *edid_ext, int len,
9958                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
9959 {
9960         int i;
9961
9962         /* send extension block to DMCU for parsing */
9963         for (i = 0; i < len; i += 8) {
9964                 bool res;
9965                 int offset;
9966
9967                 /* send 8 bytes a time */
9968                 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
9969                         return false;
9970
9971                 if (i+8 == len) {
9972                         /* EDID block sent completed, expect result */
9973                         int version, min_rate, max_rate;
9974
9975                         res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
9976                         if (res) {
9977                                 /* amd vsdb found */
9978                                 vsdb_info->freesync_supported = 1;
9979                                 vsdb_info->amd_vsdb_version = version;
9980                                 vsdb_info->min_refresh_rate_hz = min_rate;
9981                                 vsdb_info->max_refresh_rate_hz = max_rate;
9982                                 return true;
9983                         }
9984                         /* not amd vsdb */
9985                         return false;
9986                 }
9987
9988                 /* check for ack*/
9989                 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
9990                 if (!res)
9991                         return false;
9992         }
9993
9994         return false;
9995 }
9996
9997 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
9998                 uint8_t *edid_ext, int len,
9999                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10000 {
10001         int i;
10002
10003         /* send extension block to DMCU for parsing */
10004         for (i = 0; i < len; i += 8) {
10005                 /* send 8 bytes a time */
10006                 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
10007                         return false;
10008         }
10009
10010         return vsdb_info->freesync_supported;
10011 }
10012
10013 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10014                 uint8_t *edid_ext, int len,
10015                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10016 {
10017         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10018
10019         if (adev->dm.dmub_srv)
10020                 return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
10021         else
10022                 return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
10023 }
10024
10025 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10026                 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10027 {
10028         uint8_t *edid_ext = NULL;
10029         int i;
10030         bool valid_vsdb_found = false;
10031
10032         /*----- drm_find_cea_extension() -----*/
10033         /* No EDID or EDID extensions */
10034         if (edid == NULL || edid->extensions == 0)
10035                 return -ENODEV;
10036
10037         /* Find CEA extension */
10038         for (i = 0; i < edid->extensions; i++) {
10039                 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10040                 if (edid_ext[0] == CEA_EXT)
10041                         break;
10042         }
10043
10044         if (i == edid->extensions)
10045                 return -ENODEV;
10046
10047         /*----- cea_db_offsets() -----*/
10048         if (edid_ext[0] != CEA_EXT)
10049                 return -ENODEV;
10050
10051         valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
10052
10053         return valid_vsdb_found ? i : -ENODEV;
10054 }
10055
10056 /**
10057  * amdgpu_dm_update_freesync_caps - Update Freesync capabilities
10058  *
10059  * @connector: Connector to query.
10060  * @edid: EDID from monitor
10061  *
10062  * Amdgpu supports Freesync in DP and HDMI displays, and it is required to keep
10063  * track of some of the display information in the internal data struct used by
10064  * amdgpu_dm. This function checks which type of connector we need to set the
10065  * FreeSync parameters.
10066  */
10067 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10068                                     struct edid *edid)
10069 {
10070         int i = 0;
10071         struct detailed_timing *timing;
10072         struct detailed_non_pixel *data;
10073         struct detailed_data_monitor_range *range;
10074         struct amdgpu_dm_connector *amdgpu_dm_connector =
10075                         to_amdgpu_dm_connector(connector);
10076         struct dm_connector_state *dm_con_state = NULL;
10077         struct dc_sink *sink;
10078
10079         struct drm_device *dev = connector->dev;
10080         struct amdgpu_device *adev = drm_to_adev(dev);
10081         struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
10082         bool freesync_capable = false;
10083
10084         if (!connector->state) {
10085                 DRM_ERROR("%s - Connector has no state", __func__);
10086                 goto update;
10087         }
10088
10089         sink = amdgpu_dm_connector->dc_sink ?
10090                 amdgpu_dm_connector->dc_sink :
10091                 amdgpu_dm_connector->dc_em_sink;
10092
10093         if (!edid || !sink) {
10094                 dm_con_state = to_dm_connector_state(connector->state);
10095
10096                 amdgpu_dm_connector->min_vfreq = 0;
10097                 amdgpu_dm_connector->max_vfreq = 0;
10098                 amdgpu_dm_connector->pixel_clock_mhz = 0;
10099                 connector->display_info.monitor_range.min_vfreq = 0;
10100                 connector->display_info.monitor_range.max_vfreq = 0;
10101                 freesync_capable = false;
10102
10103                 goto update;
10104         }
10105
10106         dm_con_state = to_dm_connector_state(connector->state);
10107
10108         if (!adev->dm.freesync_module)
10109                 goto update;
10110
10111         if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10112                 || sink->sink_signal == SIGNAL_TYPE_EDP) {
10113                 bool edid_check_required = false;
10114
10115                 if (edid) {
10116                         edid_check_required = is_dp_capable_without_timing_msa(
10117                                                 adev->dm.dc,
10118                                                 amdgpu_dm_connector);
10119                 }
10120
10121                 if (edid_check_required == true && (edid->version > 1 ||
10122                    (edid->version == 1 && edid->revision > 1))) {
10123                         for (i = 0; i < 4; i++) {
10124
10125                                 timing  = &edid->detailed_timings[i];
10126                                 data    = &timing->data.other_data;
10127                                 range   = &data->data.range;
10128                                 /*
10129                                  * Check if monitor has continuous frequency mode
10130                                  */
10131                                 if (data->type != EDID_DETAIL_MONITOR_RANGE)
10132                                         continue;
10133                                 /*
10134                                  * Check for flag range limits only. If flag == 1 then
10135                                  * no additional timing information provided.
10136                                  * Default GTF, GTF Secondary curve and CVT are not
10137                                  * supported
10138                                  */
10139                                 if (range->flags != 1)
10140                                         continue;
10141
10142                                 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10143                                 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10144                                 amdgpu_dm_connector->pixel_clock_mhz =
10145                                         range->pixel_clock_mhz * 10;
10146
10147                                 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10148                                 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10149
10150                                 break;
10151                         }
10152
10153                         if (amdgpu_dm_connector->max_vfreq -
10154                             amdgpu_dm_connector->min_vfreq > 10) {
10155
10156                                 freesync_capable = true;
10157                         }
10158                 }
10159         } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10160                 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10161                 if (i >= 0 && vsdb_info.freesync_supported) {
10162                         timing  = &edid->detailed_timings[i];
10163                         data    = &timing->data.other_data;
10164
10165                         amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10166                         amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10167                         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10168                                 freesync_capable = true;
10169
10170                         connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10171                         connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10172                 }
10173         }
10174
10175 update:
10176         if (dm_con_state)
10177                 dm_con_state->freesync_capable = freesync_capable;
10178
10179         if (connector->vrr_capable_property)
10180                 drm_connector_set_vrr_capable_property(connector,
10181                                                        freesync_capable);
10182 }
10183
10184 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10185 {
10186         struct amdgpu_device *adev = drm_to_adev(dev);
10187         struct dc *dc = adev->dm.dc;
10188         int i;
10189
10190         mutex_lock(&adev->dm.dc_lock);
10191         if (dc->current_state) {
10192                 for (i = 0; i < dc->current_state->stream_count; ++i)
10193                         dc->current_state->streams[i]
10194                                 ->triggered_crtc_reset.enabled =
10195                                 adev->dm.force_timing_sync;
10196
10197                 dm_enable_per_frame_crtc_master_sync(dc->current_state);
10198                 dc_trigger_sync(dc, dc->current_state);
10199         }
10200         mutex_unlock(&adev->dm.dc_lock);
10201 }
10202
10203 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10204                        uint32_t value, const char *func_name)
10205 {
10206 #ifdef DM_CHECK_ADDR_0
10207         if (address == 0) {
10208                 DC_ERR("invalid register write. address = 0");
10209                 return;
10210         }
10211 #endif
10212         cgs_write_register(ctx->cgs_device, address, value);
10213         trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10214 }
10215
10216 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10217                           const char *func_name)
10218 {
10219         uint32_t value;
10220 #ifdef DM_CHECK_ADDR_0
10221         if (address == 0) {
10222                 DC_ERR("invalid register read; address = 0\n");
10223                 return 0;
10224         }
10225 #endif
10226
10227         if (ctx->dmub_srv &&
10228             ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10229             !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10230                 ASSERT(false);
10231                 return 0;
10232         }
10233
10234         value = cgs_read_register(ctx->cgs_device, address);
10235
10236         trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10237
10238         return value;
10239 }
10240
10241 int amdgpu_dm_process_dmub_aux_transfer_sync(
10242                 struct dc_context *ctx,
10243                 unsigned int link_index,
10244                 struct aux_payload *payload,
10245                 enum aux_return_code_type *operation_result)
10246 {
10247         struct amdgpu_device *adev = ctx->driver_context;
10248         struct dmub_notification *p_notify = adev->dm.dmub_notify;
10249         int ret = -1;
10250
10251         mutex_lock(&adev->dm.dpia_aux_lock);
10252         if (!dc_process_dmub_aux_transfer_async(ctx->dc, link_index, payload)) {
10253                 *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
10254                 goto out;
10255         }
10256
10257         if (!wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) {
10258                 DRM_ERROR("wait_for_completion_timeout timeout!");
10259                 *operation_result = AUX_RET_ERROR_TIMEOUT;
10260                 goto out;
10261         }
10262
10263         if (p_notify->result != AUX_RET_SUCCESS) {
10264                 /*
10265                  * Transient states before tunneling is enabled could
10266                  * lead to this error. We can ignore this for now.
10267                  */
10268                 if (p_notify->result != AUX_RET_ERROR_PROTOCOL_ERROR) {
10269                         DRM_WARN("DPIA AUX failed on 0x%x(%d), error %d\n",
10270                                         payload->address, payload->length,
10271                                         p_notify->result);
10272                 }
10273                 *operation_result = AUX_RET_ERROR_INVALID_REPLY;
10274                 goto out;
10275         }
10276
10277
10278         payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
10279         if (!payload->write && p_notify->aux_reply.length &&
10280                         (payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK)) {
10281
10282                 if (payload->length != p_notify->aux_reply.length) {
10283                         DRM_WARN("invalid read length %d from DPIA AUX 0x%x(%d)!\n",
10284                                 p_notify->aux_reply.length,
10285                                         payload->address, payload->length);
10286                         *operation_result = AUX_RET_ERROR_INVALID_REPLY;
10287                         goto out;
10288                 }
10289
10290                 memcpy(payload->data, p_notify->aux_reply.data,
10291                                 p_notify->aux_reply.length);
10292         }
10293
10294         /* success */
10295         ret = p_notify->aux_reply.length;
10296         *operation_result = p_notify->result;
10297 out:
10298         mutex_unlock(&adev->dm.dpia_aux_lock);
10299         return ret;
10300 }
10301
10302 int amdgpu_dm_process_dmub_set_config_sync(
10303                 struct dc_context *ctx,
10304                 unsigned int link_index,
10305                 struct set_config_cmd_payload *payload,
10306                 enum set_config_status *operation_result)
10307 {
10308         struct amdgpu_device *adev = ctx->driver_context;
10309         bool is_cmd_complete;
10310         int ret;
10311
10312         mutex_lock(&adev->dm.dpia_aux_lock);
10313         is_cmd_complete = dc_process_dmub_set_config_async(ctx->dc,
10314                         link_index, payload, adev->dm.dmub_notify);
10315
10316         if (is_cmd_complete || wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) {
10317                 ret = 0;
10318                 *operation_result = adev->dm.dmub_notify->sc_status;
10319         } else {
10320                 DRM_ERROR("wait_for_completion_timeout timeout!");
10321                 ret = -1;
10322                 *operation_result = SET_CONFIG_UNKNOWN_ERROR;
10323         }
10324
10325         mutex_unlock(&adev->dm.dpia_aux_lock);
10326         return ret;
10327 }
10328
10329 /*
10330  * Check whether seamless boot is supported.
10331  *
10332  * So far we only support seamless boot on CHIP_VANGOGH.
10333  * If everything goes well, we may consider expanding
10334  * seamless boot to other ASICs.
10335  */
10336 bool check_seamless_boot_capability(struct amdgpu_device *adev)
10337 {
10338         switch (adev->ip_versions[DCE_HWIP][0]) {
10339         case IP_VERSION(3, 0, 1):
10340                 if (!adev->mman.keep_stolen_vga_memory)
10341                         return true;
10342                 break;
10343         default:
10344                 break;
10345         }
10346
10347         return false;
10348 }