drm/amd/display: Fix two cursor duplication when using overlay
[platform/kernel/linux-starfive.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "dc/dc_edid_parser.h"
38 #include "amdgpu_dm_trace.h"
39
40 #include "vid.h"
41 #include "amdgpu.h"
42 #include "amdgpu_display.h"
43 #include "amdgpu_ucode.h"
44 #include "atom.h"
45 #include "amdgpu_dm.h"
46 #ifdef CONFIG_DRM_AMD_DC_HDCP
47 #include "amdgpu_dm_hdcp.h"
48 #include <drm/drm_hdcp.h>
49 #endif
50 #include "amdgpu_pm.h"
51
52 #include "amd_shared.h"
53 #include "amdgpu_dm_irq.h"
54 #include "dm_helpers.h"
55 #include "amdgpu_dm_mst_types.h"
56 #if defined(CONFIG_DEBUG_FS)
57 #include "amdgpu_dm_debugfs.h"
58 #endif
59
60 #include "ivsrcid/ivsrcid_vislands30.h"
61
62 #include <linux/module.h>
63 #include <linux/moduleparam.h>
64 #include <linux/types.h>
65 #include <linux/pm_runtime.h>
66 #include <linux/pci.h>
67 #include <linux/firmware.h>
68 #include <linux/component.h>
69
70 #include <drm/drm_atomic.h>
71 #include <drm/drm_atomic_uapi.h>
72 #include <drm/drm_atomic_helper.h>
73 #include <drm/drm_dp_mst_helper.h>
74 #include <drm/drm_fb_helper.h>
75 #include <drm/drm_fourcc.h>
76 #include <drm/drm_edid.h>
77 #include <drm/drm_vblank.h>
78 #include <drm/drm_audio_component.h>
79
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87
88 #include "soc15_common.h"
89 #endif
90
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
98 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
99 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
100 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
101 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
103 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
105 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
107
108 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
109 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
110
111 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
112 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
113
114 /* Number of bytes in PSP header for firmware. */
115 #define PSP_HEADER_BYTES 0x100
116
117 /* Number of bytes in PSP footer for firmware. */
118 #define PSP_FOOTER_BYTES 0x100
119
120 /**
121  * DOC: overview
122  *
123  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
124  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
125  * requests into DC requests, and DC responses into DRM responses.
126  *
127  * The root control structure is &struct amdgpu_display_manager.
128  */
129
130 /* basic init/fini API */
131 static int amdgpu_dm_init(struct amdgpu_device *adev);
132 static void amdgpu_dm_fini(struct amdgpu_device *adev);
133 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
134
135 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
136 {
137         switch (link->dpcd_caps.dongle_type) {
138         case DISPLAY_DONGLE_NONE:
139                 return DRM_MODE_SUBCONNECTOR_Native;
140         case DISPLAY_DONGLE_DP_VGA_CONVERTER:
141                 return DRM_MODE_SUBCONNECTOR_VGA;
142         case DISPLAY_DONGLE_DP_DVI_CONVERTER:
143         case DISPLAY_DONGLE_DP_DVI_DONGLE:
144                 return DRM_MODE_SUBCONNECTOR_DVID;
145         case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
146         case DISPLAY_DONGLE_DP_HDMI_DONGLE:
147                 return DRM_MODE_SUBCONNECTOR_HDMIA;
148         case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
149         default:
150                 return DRM_MODE_SUBCONNECTOR_Unknown;
151         }
152 }
153
154 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
155 {
156         struct dc_link *link = aconnector->dc_link;
157         struct drm_connector *connector = &aconnector->base;
158         enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
159
160         if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
161                 return;
162
163         if (aconnector->dc_sink)
164                 subconnector = get_subconnector_type(link);
165
166         drm_object_property_set_value(&connector->base,
167                         connector->dev->mode_config.dp_subconnector_property,
168                         subconnector);
169 }
170
171 /*
172  * initializes drm_device display related structures, based on the information
173  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
174  * drm_encoder, drm_mode_config
175  *
176  * Returns 0 on success
177  */
178 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
179 /* removes and deallocates the drm structures, created by the above function */
180 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
181
182 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
183                                 struct drm_plane *plane,
184                                 unsigned long possible_crtcs,
185                                 const struct dc_plane_cap *plane_cap);
186 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
187                                struct drm_plane *plane,
188                                uint32_t link_index);
189 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
190                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
191                                     uint32_t link_index,
192                                     struct amdgpu_encoder *amdgpu_encoder);
193 static int amdgpu_dm_encoder_init(struct drm_device *dev,
194                                   struct amdgpu_encoder *aencoder,
195                                   uint32_t link_index);
196
197 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
198
199 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
200
201 static int amdgpu_dm_atomic_check(struct drm_device *dev,
202                                   struct drm_atomic_state *state);
203
204 static void handle_cursor_update(struct drm_plane *plane,
205                                  struct drm_plane_state *old_plane_state);
206
207 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
208 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
209 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
210 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
211 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
212
213 static const struct drm_format_info *
214 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
215
216 static bool
217 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
218                                  struct drm_crtc_state *new_crtc_state);
219 /*
220  * dm_vblank_get_counter
221  *
222  * @brief
223  * Get counter for number of vertical blanks
224  *
225  * @param
226  * struct amdgpu_device *adev - [in] desired amdgpu device
227  * int disp_idx - [in] which CRTC to get the counter from
228  *
229  * @return
230  * Counter for vertical blanks
231  */
232 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
233 {
234         if (crtc >= adev->mode_info.num_crtc)
235                 return 0;
236         else {
237                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
238
239                 if (acrtc->dm_irq_params.stream == NULL) {
240                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
241                                   crtc);
242                         return 0;
243                 }
244
245                 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
246         }
247 }
248
249 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
250                                   u32 *vbl, u32 *position)
251 {
252         uint32_t v_blank_start, v_blank_end, h_position, v_position;
253
254         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
255                 return -EINVAL;
256         else {
257                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
258
259                 if (acrtc->dm_irq_params.stream ==  NULL) {
260                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
261                                   crtc);
262                         return 0;
263                 }
264
265                 /*
266                  * TODO rework base driver to use values directly.
267                  * for now parse it back into reg-format
268                  */
269                 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
270                                          &v_blank_start,
271                                          &v_blank_end,
272                                          &h_position,
273                                          &v_position);
274
275                 *position = v_position | (h_position << 16);
276                 *vbl = v_blank_start | (v_blank_end << 16);
277         }
278
279         return 0;
280 }
281
282 static bool dm_is_idle(void *handle)
283 {
284         /* XXX todo */
285         return true;
286 }
287
288 static int dm_wait_for_idle(void *handle)
289 {
290         /* XXX todo */
291         return 0;
292 }
293
294 static bool dm_check_soft_reset(void *handle)
295 {
296         return false;
297 }
298
299 static int dm_soft_reset(void *handle)
300 {
301         /* XXX todo */
302         return 0;
303 }
304
305 static struct amdgpu_crtc *
306 get_crtc_by_otg_inst(struct amdgpu_device *adev,
307                      int otg_inst)
308 {
309         struct drm_device *dev = adev_to_drm(adev);
310         struct drm_crtc *crtc;
311         struct amdgpu_crtc *amdgpu_crtc;
312
313         if (otg_inst == -1) {
314                 WARN_ON(1);
315                 return adev->mode_info.crtcs[0];
316         }
317
318         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
319                 amdgpu_crtc = to_amdgpu_crtc(crtc);
320
321                 if (amdgpu_crtc->otg_inst == otg_inst)
322                         return amdgpu_crtc;
323         }
324
325         return NULL;
326 }
327
328 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
329 {
330         return acrtc->dm_irq_params.freesync_config.state ==
331                        VRR_STATE_ACTIVE_VARIABLE ||
332                acrtc->dm_irq_params.freesync_config.state ==
333                        VRR_STATE_ACTIVE_FIXED;
334 }
335
336 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
337 {
338         return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
339                dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
340 }
341
342 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
343                                               struct dm_crtc_state *new_state)
344 {
345         if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
346                 return true;
347         else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
348                 return true;
349         else
350                 return false;
351 }
352
353 /**
354  * dm_pflip_high_irq() - Handle pageflip interrupt
355  * @interrupt_params: ignored
356  *
357  * Handles the pageflip interrupt by notifying all interested parties
358  * that the pageflip has been completed.
359  */
360 static void dm_pflip_high_irq(void *interrupt_params)
361 {
362         struct amdgpu_crtc *amdgpu_crtc;
363         struct common_irq_params *irq_params = interrupt_params;
364         struct amdgpu_device *adev = irq_params->adev;
365         unsigned long flags;
366         struct drm_pending_vblank_event *e;
367         uint32_t vpos, hpos, v_blank_start, v_blank_end;
368         bool vrr_active;
369
370         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
371
372         /* IRQ could occur when in initial stage */
373         /* TODO work and BO cleanup */
374         if (amdgpu_crtc == NULL) {
375                 DC_LOG_PFLIP("CRTC is null, returning.\n");
376                 return;
377         }
378
379         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
380
381         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
382                 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
383                                                  amdgpu_crtc->pflip_status,
384                                                  AMDGPU_FLIP_SUBMITTED,
385                                                  amdgpu_crtc->crtc_id,
386                                                  amdgpu_crtc);
387                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
388                 return;
389         }
390
391         /* page flip completed. */
392         e = amdgpu_crtc->event;
393         amdgpu_crtc->event = NULL;
394
395         if (!e)
396                 WARN_ON(1);
397
398         vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
399
400         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
401         if (!vrr_active ||
402             !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
403                                       &v_blank_end, &hpos, &vpos) ||
404             (vpos < v_blank_start)) {
405                 /* Update to correct count and vblank timestamp if racing with
406                  * vblank irq. This also updates to the correct vblank timestamp
407                  * even in VRR mode, as scanout is past the front-porch atm.
408                  */
409                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
410
411                 /* Wake up userspace by sending the pageflip event with proper
412                  * count and timestamp of vblank of flip completion.
413                  */
414                 if (e) {
415                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
416
417                         /* Event sent, so done with vblank for this flip */
418                         drm_crtc_vblank_put(&amdgpu_crtc->base);
419                 }
420         } else if (e) {
421                 /* VRR active and inside front-porch: vblank count and
422                  * timestamp for pageflip event will only be up to date after
423                  * drm_crtc_handle_vblank() has been executed from late vblank
424                  * irq handler after start of back-porch (vline 0). We queue the
425                  * pageflip event for send-out by drm_crtc_handle_vblank() with
426                  * updated timestamp and count, once it runs after us.
427                  *
428                  * We need to open-code this instead of using the helper
429                  * drm_crtc_arm_vblank_event(), as that helper would
430                  * call drm_crtc_accurate_vblank_count(), which we must
431                  * not call in VRR mode while we are in front-porch!
432                  */
433
434                 /* sequence will be replaced by real count during send-out. */
435                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
436                 e->pipe = amdgpu_crtc->crtc_id;
437
438                 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
439                 e = NULL;
440         }
441
442         /* Keep track of vblank of this flip for flip throttling. We use the
443          * cooked hw counter, as that one incremented at start of this vblank
444          * of pageflip completion, so last_flip_vblank is the forbidden count
445          * for queueing new pageflips if vsync + VRR is enabled.
446          */
447         amdgpu_crtc->dm_irq_params.last_flip_vblank =
448                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
449
450         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
451         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
452
453         DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
454                      amdgpu_crtc->crtc_id, amdgpu_crtc,
455                      vrr_active, (int) !e);
456 }
457
458 static void dm_vupdate_high_irq(void *interrupt_params)
459 {
460         struct common_irq_params *irq_params = interrupt_params;
461         struct amdgpu_device *adev = irq_params->adev;
462         struct amdgpu_crtc *acrtc;
463         struct drm_device *drm_dev;
464         struct drm_vblank_crtc *vblank;
465         ktime_t frame_duration_ns, previous_timestamp;
466         unsigned long flags;
467         int vrr_active;
468
469         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
470
471         if (acrtc) {
472                 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
473                 drm_dev = acrtc->base.dev;
474                 vblank = &drm_dev->vblank[acrtc->base.index];
475                 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
476                 frame_duration_ns = vblank->time - previous_timestamp;
477
478                 if (frame_duration_ns > 0) {
479                         trace_amdgpu_refresh_rate_track(acrtc->base.index,
480                                                 frame_duration_ns,
481                                                 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
482                         atomic64_set(&irq_params->previous_timestamp, vblank->time);
483                 }
484
485                 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
486                               acrtc->crtc_id,
487                               vrr_active);
488
489                 /* Core vblank handling is done here after end of front-porch in
490                  * vrr mode, as vblank timestamping will give valid results
491                  * while now done after front-porch. This will also deliver
492                  * page-flip completion events that have been queued to us
493                  * if a pageflip happened inside front-porch.
494                  */
495                 if (vrr_active) {
496                         drm_crtc_handle_vblank(&acrtc->base);
497
498                         /* BTR processing for pre-DCE12 ASICs */
499                         if (acrtc->dm_irq_params.stream &&
500                             adev->family < AMDGPU_FAMILY_AI) {
501                                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
502                                 mod_freesync_handle_v_update(
503                                     adev->dm.freesync_module,
504                                     acrtc->dm_irq_params.stream,
505                                     &acrtc->dm_irq_params.vrr_params);
506
507                                 dc_stream_adjust_vmin_vmax(
508                                     adev->dm.dc,
509                                     acrtc->dm_irq_params.stream,
510                                     &acrtc->dm_irq_params.vrr_params.adjust);
511                                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
512                         }
513                 }
514         }
515 }
516
517 /**
518  * dm_crtc_high_irq() - Handles CRTC interrupt
519  * @interrupt_params: used for determining the CRTC instance
520  *
521  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
522  * event handler.
523  */
524 static void dm_crtc_high_irq(void *interrupt_params)
525 {
526         struct common_irq_params *irq_params = interrupt_params;
527         struct amdgpu_device *adev = irq_params->adev;
528         struct amdgpu_crtc *acrtc;
529         unsigned long flags;
530         int vrr_active;
531
532         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
533         if (!acrtc)
534                 return;
535
536         vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
537
538         DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
539                       vrr_active, acrtc->dm_irq_params.active_planes);
540
541         /**
542          * Core vblank handling at start of front-porch is only possible
543          * in non-vrr mode, as only there vblank timestamping will give
544          * valid results while done in front-porch. Otherwise defer it
545          * to dm_vupdate_high_irq after end of front-porch.
546          */
547         if (!vrr_active)
548                 drm_crtc_handle_vblank(&acrtc->base);
549
550         /**
551          * Following stuff must happen at start of vblank, for crc
552          * computation and below-the-range btr support in vrr mode.
553          */
554         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
555
556         /* BTR updates need to happen before VUPDATE on Vega and above. */
557         if (adev->family < AMDGPU_FAMILY_AI)
558                 return;
559
560         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
561
562         if (acrtc->dm_irq_params.stream &&
563             acrtc->dm_irq_params.vrr_params.supported &&
564             acrtc->dm_irq_params.freesync_config.state ==
565                     VRR_STATE_ACTIVE_VARIABLE) {
566                 mod_freesync_handle_v_update(adev->dm.freesync_module,
567                                              acrtc->dm_irq_params.stream,
568                                              &acrtc->dm_irq_params.vrr_params);
569
570                 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
571                                            &acrtc->dm_irq_params.vrr_params.adjust);
572         }
573
574         /*
575          * If there aren't any active_planes then DCH HUBP may be clock-gated.
576          * In that case, pageflip completion interrupts won't fire and pageflip
577          * completion events won't get delivered. Prevent this by sending
578          * pending pageflip events from here if a flip is still pending.
579          *
580          * If any planes are enabled, use dm_pflip_high_irq() instead, to
581          * avoid race conditions between flip programming and completion,
582          * which could cause too early flip completion events.
583          */
584         if (adev->family >= AMDGPU_FAMILY_RV &&
585             acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
586             acrtc->dm_irq_params.active_planes == 0) {
587                 if (acrtc->event) {
588                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
589                         acrtc->event = NULL;
590                         drm_crtc_vblank_put(&acrtc->base);
591                 }
592                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
593         }
594
595         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
596 }
597
598 #if defined(CONFIG_DRM_AMD_DC_DCN)
599 /**
600  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
601  * DCN generation ASICs
602  * @interrupt params - interrupt parameters
603  *
604  * Used to set crc window/read out crc value at vertical line 0 position
605  */
606 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
607 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
608 {
609         struct common_irq_params *irq_params = interrupt_params;
610         struct amdgpu_device *adev = irq_params->adev;
611         struct amdgpu_crtc *acrtc;
612
613         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
614
615         if (!acrtc)
616                 return;
617
618         amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
619 }
620 #endif
621 #endif
622
623 static int dm_set_clockgating_state(void *handle,
624                   enum amd_clockgating_state state)
625 {
626         return 0;
627 }
628
629 static int dm_set_powergating_state(void *handle,
630                   enum amd_powergating_state state)
631 {
632         return 0;
633 }
634
635 /* Prototypes of private functions */
636 static int dm_early_init(void* handle);
637
638 /* Allocate memory for FBC compressed data  */
639 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
640 {
641         struct drm_device *dev = connector->dev;
642         struct amdgpu_device *adev = drm_to_adev(dev);
643         struct dm_compressor_info *compressor = &adev->dm.compressor;
644         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
645         struct drm_display_mode *mode;
646         unsigned long max_size = 0;
647
648         if (adev->dm.dc->fbc_compressor == NULL)
649                 return;
650
651         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
652                 return;
653
654         if (compressor->bo_ptr)
655                 return;
656
657
658         list_for_each_entry(mode, &connector->modes, head) {
659                 if (max_size < mode->htotal * mode->vtotal)
660                         max_size = mode->htotal * mode->vtotal;
661         }
662
663         if (max_size) {
664                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
665                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
666                             &compressor->gpu_addr, &compressor->cpu_addr);
667
668                 if (r)
669                         DRM_ERROR("DM: Failed to initialize FBC\n");
670                 else {
671                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
672                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
673                 }
674
675         }
676
677 }
678
679 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
680                                           int pipe, bool *enabled,
681                                           unsigned char *buf, int max_bytes)
682 {
683         struct drm_device *dev = dev_get_drvdata(kdev);
684         struct amdgpu_device *adev = drm_to_adev(dev);
685         struct drm_connector *connector;
686         struct drm_connector_list_iter conn_iter;
687         struct amdgpu_dm_connector *aconnector;
688         int ret = 0;
689
690         *enabled = false;
691
692         mutex_lock(&adev->dm.audio_lock);
693
694         drm_connector_list_iter_begin(dev, &conn_iter);
695         drm_for_each_connector_iter(connector, &conn_iter) {
696                 aconnector = to_amdgpu_dm_connector(connector);
697                 if (aconnector->audio_inst != port)
698                         continue;
699
700                 *enabled = true;
701                 ret = drm_eld_size(connector->eld);
702                 memcpy(buf, connector->eld, min(max_bytes, ret));
703
704                 break;
705         }
706         drm_connector_list_iter_end(&conn_iter);
707
708         mutex_unlock(&adev->dm.audio_lock);
709
710         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
711
712         return ret;
713 }
714
715 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
716         .get_eld = amdgpu_dm_audio_component_get_eld,
717 };
718
719 static int amdgpu_dm_audio_component_bind(struct device *kdev,
720                                        struct device *hda_kdev, void *data)
721 {
722         struct drm_device *dev = dev_get_drvdata(kdev);
723         struct amdgpu_device *adev = drm_to_adev(dev);
724         struct drm_audio_component *acomp = data;
725
726         acomp->ops = &amdgpu_dm_audio_component_ops;
727         acomp->dev = kdev;
728         adev->dm.audio_component = acomp;
729
730         return 0;
731 }
732
733 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
734                                           struct device *hda_kdev, void *data)
735 {
736         struct drm_device *dev = dev_get_drvdata(kdev);
737         struct amdgpu_device *adev = drm_to_adev(dev);
738         struct drm_audio_component *acomp = data;
739
740         acomp->ops = NULL;
741         acomp->dev = NULL;
742         adev->dm.audio_component = NULL;
743 }
744
745 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
746         .bind   = amdgpu_dm_audio_component_bind,
747         .unbind = amdgpu_dm_audio_component_unbind,
748 };
749
750 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
751 {
752         int i, ret;
753
754         if (!amdgpu_audio)
755                 return 0;
756
757         adev->mode_info.audio.enabled = true;
758
759         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
760
761         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
762                 adev->mode_info.audio.pin[i].channels = -1;
763                 adev->mode_info.audio.pin[i].rate = -1;
764                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
765                 adev->mode_info.audio.pin[i].status_bits = 0;
766                 adev->mode_info.audio.pin[i].category_code = 0;
767                 adev->mode_info.audio.pin[i].connected = false;
768                 adev->mode_info.audio.pin[i].id =
769                         adev->dm.dc->res_pool->audios[i]->inst;
770                 adev->mode_info.audio.pin[i].offset = 0;
771         }
772
773         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
774         if (ret < 0)
775                 return ret;
776
777         adev->dm.audio_registered = true;
778
779         return 0;
780 }
781
782 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
783 {
784         if (!amdgpu_audio)
785                 return;
786
787         if (!adev->mode_info.audio.enabled)
788                 return;
789
790         if (adev->dm.audio_registered) {
791                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
792                 adev->dm.audio_registered = false;
793         }
794
795         /* TODO: Disable audio? */
796
797         adev->mode_info.audio.enabled = false;
798 }
799
800 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
801 {
802         struct drm_audio_component *acomp = adev->dm.audio_component;
803
804         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
805                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
806
807                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
808                                                  pin, -1);
809         }
810 }
811
812 static int dm_dmub_hw_init(struct amdgpu_device *adev)
813 {
814         const struct dmcub_firmware_header_v1_0 *hdr;
815         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
816         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
817         const struct firmware *dmub_fw = adev->dm.dmub_fw;
818         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
819         struct abm *abm = adev->dm.dc->res_pool->abm;
820         struct dmub_srv_hw_params hw_params;
821         enum dmub_status status;
822         const unsigned char *fw_inst_const, *fw_bss_data;
823         uint32_t i, fw_inst_const_size, fw_bss_data_size;
824         bool has_hw_support;
825
826         if (!dmub_srv)
827                 /* DMUB isn't supported on the ASIC. */
828                 return 0;
829
830         if (!fb_info) {
831                 DRM_ERROR("No framebuffer info for DMUB service.\n");
832                 return -EINVAL;
833         }
834
835         if (!dmub_fw) {
836                 /* Firmware required for DMUB support. */
837                 DRM_ERROR("No firmware provided for DMUB.\n");
838                 return -EINVAL;
839         }
840
841         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
842         if (status != DMUB_STATUS_OK) {
843                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
844                 return -EINVAL;
845         }
846
847         if (!has_hw_support) {
848                 DRM_INFO("DMUB unsupported on ASIC\n");
849                 return 0;
850         }
851
852         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
853
854         fw_inst_const = dmub_fw->data +
855                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
856                         PSP_HEADER_BYTES;
857
858         fw_bss_data = dmub_fw->data +
859                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
860                       le32_to_cpu(hdr->inst_const_bytes);
861
862         /* Copy firmware and bios info into FB memory. */
863         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
864                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
865
866         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
867
868         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
869          * amdgpu_ucode_init_single_fw will load dmub firmware
870          * fw_inst_const part to cw0; otherwise, the firmware back door load
871          * will be done by dm_dmub_hw_init
872          */
873         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
874                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
875                                 fw_inst_const_size);
876         }
877
878         if (fw_bss_data_size)
879                 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
880                        fw_bss_data, fw_bss_data_size);
881
882         /* Copy firmware bios info into FB memory. */
883         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
884                adev->bios_size);
885
886         /* Reset regions that need to be reset. */
887         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
888         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
889
890         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
891                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
892
893         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
894                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
895
896         /* Initialize hardware. */
897         memset(&hw_params, 0, sizeof(hw_params));
898         hw_params.fb_base = adev->gmc.fb_start;
899         hw_params.fb_offset = adev->gmc.aper_base;
900
901         /* backdoor load firmware and trigger dmub running */
902         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
903                 hw_params.load_inst_const = true;
904
905         if (dmcu)
906                 hw_params.psp_version = dmcu->psp_version;
907
908         for (i = 0; i < fb_info->num_fb; ++i)
909                 hw_params.fb[i] = &fb_info->fb[i];
910
911         status = dmub_srv_hw_init(dmub_srv, &hw_params);
912         if (status != DMUB_STATUS_OK) {
913                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
914                 return -EINVAL;
915         }
916
917         /* Wait for firmware load to finish. */
918         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
919         if (status != DMUB_STATUS_OK)
920                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
921
922         /* Init DMCU and ABM if available. */
923         if (dmcu && abm) {
924                 dmcu->funcs->dmcu_init(dmcu);
925                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
926         }
927
928         adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
929         if (!adev->dm.dc->ctx->dmub_srv) {
930                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
931                 return -ENOMEM;
932         }
933
934         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
935                  adev->dm.dmcub_fw_version);
936
937         return 0;
938 }
939
940 #if defined(CONFIG_DRM_AMD_DC_DCN)
941 #define DMUB_TRACE_MAX_READ 64
942 static void dm_dmub_trace_high_irq(void *interrupt_params)
943 {
944         struct common_irq_params *irq_params = interrupt_params;
945         struct amdgpu_device *adev = irq_params->adev;
946         struct amdgpu_display_manager *dm = &adev->dm;
947         struct dmcub_trace_buf_entry entry = { 0 };
948         uint32_t count = 0;
949
950         do {
951                 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
952                         trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
953                                                         entry.param0, entry.param1);
954
955                         DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
956                                  entry.trace_code, entry.tick_count, entry.param0, entry.param1);
957                 } else
958                         break;
959
960                 count++;
961
962         } while (count <= DMUB_TRACE_MAX_READ);
963
964         ASSERT(count <= DMUB_TRACE_MAX_READ);
965 }
966
967 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
968 {
969         uint64_t pt_base;
970         uint32_t logical_addr_low;
971         uint32_t logical_addr_high;
972         uint32_t agp_base, agp_bot, agp_top;
973         PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
974
975         logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
976         pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
977
978         if (adev->apu_flags & AMD_APU_IS_RAVEN2)
979                 /*
980                  * Raven2 has a HW issue that it is unable to use the vram which
981                  * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
982                  * workaround that increase system aperture high address (add 1)
983                  * to get rid of the VM fault and hardware hang.
984                  */
985                 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
986         else
987                 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
988
989         agp_base = 0;
990         agp_bot = adev->gmc.agp_start >> 24;
991         agp_top = adev->gmc.agp_end >> 24;
992
993
994         page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
995         page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
996         page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
997         page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
998         page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
999         page_table_base.low_part = lower_32_bits(pt_base);
1000
1001         pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1002         pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1003
1004         pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1005         pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1006         pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1007
1008         pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1009         pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1010         pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1011
1012         pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1013         pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1014         pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1015
1016         pa_config->is_hvm_enabled = 0;
1017
1018 }
1019 #endif
1020 #if defined(CONFIG_DRM_AMD_DC_DCN)
1021 static void event_mall_stutter(struct work_struct *work)
1022 {
1023
1024         struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
1025         struct amdgpu_display_manager *dm = vblank_work->dm;
1026
1027         mutex_lock(&dm->dc_lock);
1028
1029         if (vblank_work->enable)
1030                 dm->active_vblank_irq_count++;
1031         else if(dm->active_vblank_irq_count)
1032                 dm->active_vblank_irq_count--;
1033
1034         dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1035
1036         DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1037
1038         mutex_unlock(&dm->dc_lock);
1039 }
1040
1041 static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
1042 {
1043
1044         int max_caps = dc->caps.max_links;
1045         struct vblank_workqueue *vblank_work;
1046         int i = 0;
1047
1048         vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
1049         if (ZERO_OR_NULL_PTR(vblank_work)) {
1050                 kfree(vblank_work);
1051                 return NULL;
1052         }
1053
1054         for (i = 0; i < max_caps; i++)
1055                 INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
1056
1057         return vblank_work;
1058 }
1059 #endif
1060 static int amdgpu_dm_init(struct amdgpu_device *adev)
1061 {
1062         struct dc_init_data init_data;
1063 #ifdef CONFIG_DRM_AMD_DC_HDCP
1064         struct dc_callback_init init_params;
1065 #endif
1066         int r;
1067
1068         adev->dm.ddev = adev_to_drm(adev);
1069         adev->dm.adev = adev;
1070
1071         /* Zero all the fields */
1072         memset(&init_data, 0, sizeof(init_data));
1073 #ifdef CONFIG_DRM_AMD_DC_HDCP
1074         memset(&init_params, 0, sizeof(init_params));
1075 #endif
1076
1077         mutex_init(&adev->dm.dc_lock);
1078         mutex_init(&adev->dm.audio_lock);
1079 #if defined(CONFIG_DRM_AMD_DC_DCN)
1080         spin_lock_init(&adev->dm.vblank_lock);
1081 #endif
1082
1083         if(amdgpu_dm_irq_init(adev)) {
1084                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1085                 goto error;
1086         }
1087
1088         init_data.asic_id.chip_family = adev->family;
1089
1090         init_data.asic_id.pci_revision_id = adev->pdev->revision;
1091         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1092
1093         init_data.asic_id.vram_width = adev->gmc.vram_width;
1094         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1095         init_data.asic_id.atombios_base_address =
1096                 adev->mode_info.atom_context->bios;
1097
1098         init_data.driver = adev;
1099
1100         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1101
1102         if (!adev->dm.cgs_device) {
1103                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1104                 goto error;
1105         }
1106
1107         init_data.cgs_device = adev->dm.cgs_device;
1108
1109         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1110
1111         switch (adev->asic_type) {
1112         case CHIP_CARRIZO:
1113         case CHIP_STONEY:
1114         case CHIP_RAVEN:
1115         case CHIP_RENOIR:
1116                 init_data.flags.gpu_vm_support = true;
1117                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1118                         init_data.flags.disable_dmcu = true;
1119                 break;
1120 #if defined(CONFIG_DRM_AMD_DC_DCN)
1121         case CHIP_VANGOGH:
1122                 init_data.flags.gpu_vm_support = true;
1123                 break;
1124 #endif
1125         default:
1126                 break;
1127         }
1128
1129         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1130                 init_data.flags.fbc_support = true;
1131
1132         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1133                 init_data.flags.multi_mon_pp_mclk_switch = true;
1134
1135         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1136                 init_data.flags.disable_fractional_pwm = true;
1137
1138         init_data.flags.power_down_display_on_boot = true;
1139
1140         INIT_LIST_HEAD(&adev->dm.da_list);
1141         /* Display Core create. */
1142         adev->dm.dc = dc_create(&init_data);
1143
1144         if (adev->dm.dc) {
1145                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1146         } else {
1147                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1148                 goto error;
1149         }
1150
1151         if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1152                 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1153                 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1154         }
1155
1156         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1157                 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1158
1159         if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1160                 adev->dm.dc->debug.disable_stutter = true;
1161
1162         if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1163                 adev->dm.dc->debug.disable_dsc = true;
1164
1165         if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1166                 adev->dm.dc->debug.disable_clock_gate = true;
1167
1168         r = dm_dmub_hw_init(adev);
1169         if (r) {
1170                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1171                 goto error;
1172         }
1173
1174         dc_hardware_init(adev->dm.dc);
1175
1176 #if defined(CONFIG_DRM_AMD_DC_DCN)
1177         if (adev->apu_flags) {
1178                 struct dc_phy_addr_space_config pa_config;
1179
1180                 mmhub_read_system_context(adev, &pa_config);
1181
1182                 // Call the DC init_memory func
1183                 dc_setup_system_context(adev->dm.dc, &pa_config);
1184         }
1185 #endif
1186
1187         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1188         if (!adev->dm.freesync_module) {
1189                 DRM_ERROR(
1190                 "amdgpu: failed to initialize freesync_module.\n");
1191         } else
1192                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1193                                 adev->dm.freesync_module);
1194
1195         amdgpu_dm_init_color_mod();
1196
1197 #if defined(CONFIG_DRM_AMD_DC_DCN)
1198         if (adev->dm.dc->caps.max_links > 0) {
1199                 adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
1200
1201                 if (!adev->dm.vblank_workqueue)
1202                         DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1203                 else
1204                         DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
1205         }
1206 #endif
1207
1208 #ifdef CONFIG_DRM_AMD_DC_HDCP
1209         if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1210                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1211
1212                 if (!adev->dm.hdcp_workqueue)
1213                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1214                 else
1215                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1216
1217                 dc_init_callbacks(adev->dm.dc, &init_params);
1218         }
1219 #endif
1220 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1221         adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1222 #endif
1223         if (amdgpu_dm_initialize_drm_device(adev)) {
1224                 DRM_ERROR(
1225                 "amdgpu: failed to initialize sw for display support.\n");
1226                 goto error;
1227         }
1228
1229         /* create fake encoders for MST */
1230         dm_dp_create_fake_mst_encoders(adev);
1231
1232         /* TODO: Add_display_info? */
1233
1234         /* TODO use dynamic cursor width */
1235         adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1236         adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1237
1238         if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1239                 DRM_ERROR(
1240                 "amdgpu: failed to initialize sw for display support.\n");
1241                 goto error;
1242         }
1243
1244
1245         DRM_DEBUG_DRIVER("KMS initialized.\n");
1246
1247         return 0;
1248 error:
1249         amdgpu_dm_fini(adev);
1250
1251         return -EINVAL;
1252 }
1253
1254 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1255 {
1256         int i;
1257
1258         for (i = 0; i < adev->dm.display_indexes_num; i++) {
1259                 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1260         }
1261
1262         amdgpu_dm_audio_fini(adev);
1263
1264         amdgpu_dm_destroy_drm_device(&adev->dm);
1265
1266 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1267         if (adev->dm.crc_rd_wrk) {
1268                 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1269                 kfree(adev->dm.crc_rd_wrk);
1270                 adev->dm.crc_rd_wrk = NULL;
1271         }
1272 #endif
1273 #ifdef CONFIG_DRM_AMD_DC_HDCP
1274         if (adev->dm.hdcp_workqueue) {
1275                 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1276                 adev->dm.hdcp_workqueue = NULL;
1277         }
1278
1279         if (adev->dm.dc)
1280                 dc_deinit_callbacks(adev->dm.dc);
1281 #endif
1282
1283 #if defined(CONFIG_DRM_AMD_DC_DCN)
1284         if (adev->dm.vblank_workqueue) {
1285                 adev->dm.vblank_workqueue->dm = NULL;
1286                 kfree(adev->dm.vblank_workqueue);
1287                 adev->dm.vblank_workqueue = NULL;
1288         }
1289 #endif
1290
1291         if (adev->dm.dc->ctx->dmub_srv) {
1292                 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1293                 adev->dm.dc->ctx->dmub_srv = NULL;
1294         }
1295
1296         if (adev->dm.dmub_bo)
1297                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1298                                       &adev->dm.dmub_bo_gpu_addr,
1299                                       &adev->dm.dmub_bo_cpu_addr);
1300
1301         /* DC Destroy TODO: Replace destroy DAL */
1302         if (adev->dm.dc)
1303                 dc_destroy(&adev->dm.dc);
1304         /*
1305          * TODO: pageflip, vlank interrupt
1306          *
1307          * amdgpu_dm_irq_fini(adev);
1308          */
1309
1310         if (adev->dm.cgs_device) {
1311                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1312                 adev->dm.cgs_device = NULL;
1313         }
1314         if (adev->dm.freesync_module) {
1315                 mod_freesync_destroy(adev->dm.freesync_module);
1316                 adev->dm.freesync_module = NULL;
1317         }
1318
1319         mutex_destroy(&adev->dm.audio_lock);
1320         mutex_destroy(&adev->dm.dc_lock);
1321
1322         return;
1323 }
1324
1325 static int load_dmcu_fw(struct amdgpu_device *adev)
1326 {
1327         const char *fw_name_dmcu = NULL;
1328         int r;
1329         const struct dmcu_firmware_header_v1_0 *hdr;
1330
1331         switch(adev->asic_type) {
1332 #if defined(CONFIG_DRM_AMD_DC_SI)
1333         case CHIP_TAHITI:
1334         case CHIP_PITCAIRN:
1335         case CHIP_VERDE:
1336         case CHIP_OLAND:
1337 #endif
1338         case CHIP_BONAIRE:
1339         case CHIP_HAWAII:
1340         case CHIP_KAVERI:
1341         case CHIP_KABINI:
1342         case CHIP_MULLINS:
1343         case CHIP_TONGA:
1344         case CHIP_FIJI:
1345         case CHIP_CARRIZO:
1346         case CHIP_STONEY:
1347         case CHIP_POLARIS11:
1348         case CHIP_POLARIS10:
1349         case CHIP_POLARIS12:
1350         case CHIP_VEGAM:
1351         case CHIP_VEGA10:
1352         case CHIP_VEGA12:
1353         case CHIP_VEGA20:
1354         case CHIP_NAVI10:
1355         case CHIP_NAVI14:
1356         case CHIP_RENOIR:
1357         case CHIP_SIENNA_CICHLID:
1358         case CHIP_NAVY_FLOUNDER:
1359         case CHIP_DIMGREY_CAVEFISH:
1360         case CHIP_VANGOGH:
1361                 return 0;
1362         case CHIP_NAVI12:
1363                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1364                 break;
1365         case CHIP_RAVEN:
1366                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1367                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1368                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1369                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1370                 else
1371                         return 0;
1372                 break;
1373         default:
1374                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1375                 return -EINVAL;
1376         }
1377
1378         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1379                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1380                 return 0;
1381         }
1382
1383         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1384         if (r == -ENOENT) {
1385                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1386                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1387                 adev->dm.fw_dmcu = NULL;
1388                 return 0;
1389         }
1390         if (r) {
1391                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1392                         fw_name_dmcu);
1393                 return r;
1394         }
1395
1396         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1397         if (r) {
1398                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1399                         fw_name_dmcu);
1400                 release_firmware(adev->dm.fw_dmcu);
1401                 adev->dm.fw_dmcu = NULL;
1402                 return r;
1403         }
1404
1405         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1406         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1407         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1408         adev->firmware.fw_size +=
1409                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1410
1411         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1412         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1413         adev->firmware.fw_size +=
1414                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1415
1416         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1417
1418         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1419
1420         return 0;
1421 }
1422
1423 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1424 {
1425         struct amdgpu_device *adev = ctx;
1426
1427         return dm_read_reg(adev->dm.dc->ctx, address);
1428 }
1429
1430 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1431                                      uint32_t value)
1432 {
1433         struct amdgpu_device *adev = ctx;
1434
1435         return dm_write_reg(adev->dm.dc->ctx, address, value);
1436 }
1437
1438 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1439 {
1440         struct dmub_srv_create_params create_params;
1441         struct dmub_srv_region_params region_params;
1442         struct dmub_srv_region_info region_info;
1443         struct dmub_srv_fb_params fb_params;
1444         struct dmub_srv_fb_info *fb_info;
1445         struct dmub_srv *dmub_srv;
1446         const struct dmcub_firmware_header_v1_0 *hdr;
1447         const char *fw_name_dmub;
1448         enum dmub_asic dmub_asic;
1449         enum dmub_status status;
1450         int r;
1451
1452         switch (adev->asic_type) {
1453         case CHIP_RENOIR:
1454                 dmub_asic = DMUB_ASIC_DCN21;
1455                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1456                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1457                         fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1458                 break;
1459         case CHIP_SIENNA_CICHLID:
1460                 dmub_asic = DMUB_ASIC_DCN30;
1461                 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1462                 break;
1463         case CHIP_NAVY_FLOUNDER:
1464                 dmub_asic = DMUB_ASIC_DCN30;
1465                 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1466                 break;
1467         case CHIP_VANGOGH:
1468                 dmub_asic = DMUB_ASIC_DCN301;
1469                 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1470                 break;
1471         case CHIP_DIMGREY_CAVEFISH:
1472                 dmub_asic = DMUB_ASIC_DCN302;
1473                 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1474                 break;
1475
1476         default:
1477                 /* ASIC doesn't support DMUB. */
1478                 return 0;
1479         }
1480
1481         r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1482         if (r) {
1483                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1484                 return 0;
1485         }
1486
1487         r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1488         if (r) {
1489                 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1490                 return 0;
1491         }
1492
1493         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1494
1495         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1496                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1497                         AMDGPU_UCODE_ID_DMCUB;
1498                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1499                         adev->dm.dmub_fw;
1500                 adev->firmware.fw_size +=
1501                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1502
1503                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1504                          adev->dm.dmcub_fw_version);
1505         }
1506
1507         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1508
1509         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1510         dmub_srv = adev->dm.dmub_srv;
1511
1512         if (!dmub_srv) {
1513                 DRM_ERROR("Failed to allocate DMUB service!\n");
1514                 return -ENOMEM;
1515         }
1516
1517         memset(&create_params, 0, sizeof(create_params));
1518         create_params.user_ctx = adev;
1519         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1520         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1521         create_params.asic = dmub_asic;
1522
1523         /* Create the DMUB service. */
1524         status = dmub_srv_create(dmub_srv, &create_params);
1525         if (status != DMUB_STATUS_OK) {
1526                 DRM_ERROR("Error creating DMUB service: %d\n", status);
1527                 return -EINVAL;
1528         }
1529
1530         /* Calculate the size of all the regions for the DMUB service. */
1531         memset(&region_params, 0, sizeof(region_params));
1532
1533         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1534                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1535         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1536         region_params.vbios_size = adev->bios_size;
1537         region_params.fw_bss_data = region_params.bss_data_size ?
1538                 adev->dm.dmub_fw->data +
1539                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1540                 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1541         region_params.fw_inst_const =
1542                 adev->dm.dmub_fw->data +
1543                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1544                 PSP_HEADER_BYTES;
1545
1546         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1547                                            &region_info);
1548
1549         if (status != DMUB_STATUS_OK) {
1550                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1551                 return -EINVAL;
1552         }
1553
1554         /*
1555          * Allocate a framebuffer based on the total size of all the regions.
1556          * TODO: Move this into GART.
1557          */
1558         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1559                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1560                                     &adev->dm.dmub_bo_gpu_addr,
1561                                     &adev->dm.dmub_bo_cpu_addr);
1562         if (r)
1563                 return r;
1564
1565         /* Rebase the regions on the framebuffer address. */
1566         memset(&fb_params, 0, sizeof(fb_params));
1567         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1568         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1569         fb_params.region_info = &region_info;
1570
1571         adev->dm.dmub_fb_info =
1572                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1573         fb_info = adev->dm.dmub_fb_info;
1574
1575         if (!fb_info) {
1576                 DRM_ERROR(
1577                         "Failed to allocate framebuffer info for DMUB service!\n");
1578                 return -ENOMEM;
1579         }
1580
1581         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1582         if (status != DMUB_STATUS_OK) {
1583                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1584                 return -EINVAL;
1585         }
1586
1587         return 0;
1588 }
1589
1590 static int dm_sw_init(void *handle)
1591 {
1592         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1593         int r;
1594
1595         r = dm_dmub_sw_init(adev);
1596         if (r)
1597                 return r;
1598
1599         return load_dmcu_fw(adev);
1600 }
1601
1602 static int dm_sw_fini(void *handle)
1603 {
1604         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1605
1606         kfree(adev->dm.dmub_fb_info);
1607         adev->dm.dmub_fb_info = NULL;
1608
1609         if (adev->dm.dmub_srv) {
1610                 dmub_srv_destroy(adev->dm.dmub_srv);
1611                 adev->dm.dmub_srv = NULL;
1612         }
1613
1614         release_firmware(adev->dm.dmub_fw);
1615         adev->dm.dmub_fw = NULL;
1616
1617         release_firmware(adev->dm.fw_dmcu);
1618         adev->dm.fw_dmcu = NULL;
1619
1620         return 0;
1621 }
1622
1623 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1624 {
1625         struct amdgpu_dm_connector *aconnector;
1626         struct drm_connector *connector;
1627         struct drm_connector_list_iter iter;
1628         int ret = 0;
1629
1630         drm_connector_list_iter_begin(dev, &iter);
1631         drm_for_each_connector_iter(connector, &iter) {
1632                 aconnector = to_amdgpu_dm_connector(connector);
1633                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1634                     aconnector->mst_mgr.aux) {
1635                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1636                                          aconnector,
1637                                          aconnector->base.base.id);
1638
1639                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1640                         if (ret < 0) {
1641                                 DRM_ERROR("DM_MST: Failed to start MST\n");
1642                                 aconnector->dc_link->type =
1643                                         dc_connection_single;
1644                                 break;
1645                         }
1646                 }
1647         }
1648         drm_connector_list_iter_end(&iter);
1649
1650         return ret;
1651 }
1652
1653 static int dm_late_init(void *handle)
1654 {
1655         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1656
1657         struct dmcu_iram_parameters params;
1658         unsigned int linear_lut[16];
1659         int i;
1660         struct dmcu *dmcu = NULL;
1661         bool ret = true;
1662
1663         dmcu = adev->dm.dc->res_pool->dmcu;
1664
1665         for (i = 0; i < 16; i++)
1666                 linear_lut[i] = 0xFFFF * i / 15;
1667
1668         params.set = 0;
1669         params.backlight_ramping_start = 0xCCCC;
1670         params.backlight_ramping_reduction = 0xCCCCCCCC;
1671         params.backlight_lut_array_size = 16;
1672         params.backlight_lut_array = linear_lut;
1673
1674         /* Min backlight level after ABM reduction,  Don't allow below 1%
1675          * 0xFFFF x 0.01 = 0x28F
1676          */
1677         params.min_abm_backlight = 0x28F;
1678
1679         /* In the case where abm is implemented on dmcub,
1680          * dmcu object will be null.
1681          * ABM 2.4 and up are implemented on dmcub.
1682          */
1683         if (dmcu)
1684                 ret = dmcu_load_iram(dmcu, params);
1685         else if (adev->dm.dc->ctx->dmub_srv)
1686                 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1687
1688         if (!ret)
1689                 return -EINVAL;
1690
1691         return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1692 }
1693
1694 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1695 {
1696         struct amdgpu_dm_connector *aconnector;
1697         struct drm_connector *connector;
1698         struct drm_connector_list_iter iter;
1699         struct drm_dp_mst_topology_mgr *mgr;
1700         int ret;
1701         bool need_hotplug = false;
1702
1703         drm_connector_list_iter_begin(dev, &iter);
1704         drm_for_each_connector_iter(connector, &iter) {
1705                 aconnector = to_amdgpu_dm_connector(connector);
1706                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1707                     aconnector->mst_port)
1708                         continue;
1709
1710                 mgr = &aconnector->mst_mgr;
1711
1712                 if (suspend) {
1713                         drm_dp_mst_topology_mgr_suspend(mgr);
1714                 } else {
1715                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1716                         if (ret < 0) {
1717                                 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1718                                 need_hotplug = true;
1719                         }
1720                 }
1721         }
1722         drm_connector_list_iter_end(&iter);
1723
1724         if (need_hotplug)
1725                 drm_kms_helper_hotplug_event(dev);
1726 }
1727
1728 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1729 {
1730         struct smu_context *smu = &adev->smu;
1731         int ret = 0;
1732
1733         if (!is_support_sw_smu(adev))
1734                 return 0;
1735
1736         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1737          * on window driver dc implementation.
1738          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1739          * should be passed to smu during boot up and resume from s3.
1740          * boot up: dc calculate dcn watermark clock settings within dc_create,
1741          * dcn20_resource_construct
1742          * then call pplib functions below to pass the settings to smu:
1743          * smu_set_watermarks_for_clock_ranges
1744          * smu_set_watermarks_table
1745          * navi10_set_watermarks_table
1746          * smu_write_watermarks_table
1747          *
1748          * For Renoir, clock settings of dcn watermark are also fixed values.
1749          * dc has implemented different flow for window driver:
1750          * dc_hardware_init / dc_set_power_state
1751          * dcn10_init_hw
1752          * notify_wm_ranges
1753          * set_wm_ranges
1754          * -- Linux
1755          * smu_set_watermarks_for_clock_ranges
1756          * renoir_set_watermarks_table
1757          * smu_write_watermarks_table
1758          *
1759          * For Linux,
1760          * dc_hardware_init -> amdgpu_dm_init
1761          * dc_set_power_state --> dm_resume
1762          *
1763          * therefore, this function apply to navi10/12/14 but not Renoir
1764          * *
1765          */
1766         switch(adev->asic_type) {
1767         case CHIP_NAVI10:
1768         case CHIP_NAVI14:
1769         case CHIP_NAVI12:
1770                 break;
1771         default:
1772                 return 0;
1773         }
1774
1775         ret = smu_write_watermarks_table(smu);
1776         if (ret) {
1777                 DRM_ERROR("Failed to update WMTABLE!\n");
1778                 return ret;
1779         }
1780
1781         return 0;
1782 }
1783
1784 /**
1785  * dm_hw_init() - Initialize DC device
1786  * @handle: The base driver device containing the amdgpu_dm device.
1787  *
1788  * Initialize the &struct amdgpu_display_manager device. This involves calling
1789  * the initializers of each DM component, then populating the struct with them.
1790  *
1791  * Although the function implies hardware initialization, both hardware and
1792  * software are initialized here. Splitting them out to their relevant init
1793  * hooks is a future TODO item.
1794  *
1795  * Some notable things that are initialized here:
1796  *
1797  * - Display Core, both software and hardware
1798  * - DC modules that we need (freesync and color management)
1799  * - DRM software states
1800  * - Interrupt sources and handlers
1801  * - Vblank support
1802  * - Debug FS entries, if enabled
1803  */
1804 static int dm_hw_init(void *handle)
1805 {
1806         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1807         /* Create DAL display manager */
1808         amdgpu_dm_init(adev);
1809         amdgpu_dm_hpd_init(adev);
1810
1811         return 0;
1812 }
1813
1814 /**
1815  * dm_hw_fini() - Teardown DC device
1816  * @handle: The base driver device containing the amdgpu_dm device.
1817  *
1818  * Teardown components within &struct amdgpu_display_manager that require
1819  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1820  * were loaded. Also flush IRQ workqueues and disable them.
1821  */
1822 static int dm_hw_fini(void *handle)
1823 {
1824         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1825
1826         amdgpu_dm_hpd_fini(adev);
1827
1828         amdgpu_dm_irq_fini(adev);
1829         amdgpu_dm_fini(adev);
1830         return 0;
1831 }
1832
1833
1834 static int dm_enable_vblank(struct drm_crtc *crtc);
1835 static void dm_disable_vblank(struct drm_crtc *crtc);
1836
1837 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1838                                  struct dc_state *state, bool enable)
1839 {
1840         enum dc_irq_source irq_source;
1841         struct amdgpu_crtc *acrtc;
1842         int rc = -EBUSY;
1843         int i = 0;
1844
1845         for (i = 0; i < state->stream_count; i++) {
1846                 acrtc = get_crtc_by_otg_inst(
1847                                 adev, state->stream_status[i].primary_otg_inst);
1848
1849                 if (acrtc && state->stream_status[i].plane_count != 0) {
1850                         irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1851                         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1852                         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
1853                                       acrtc->crtc_id, enable ? "en" : "dis", rc);
1854                         if (rc)
1855                                 DRM_WARN("Failed to %s pflip interrupts\n",
1856                                          enable ? "enable" : "disable");
1857
1858                         if (enable) {
1859                                 rc = dm_enable_vblank(&acrtc->base);
1860                                 if (rc)
1861                                         DRM_WARN("Failed to enable vblank interrupts\n");
1862                         } else {
1863                                 dm_disable_vblank(&acrtc->base);
1864                         }
1865
1866                 }
1867         }
1868
1869 }
1870
1871 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1872 {
1873         struct dc_state *context = NULL;
1874         enum dc_status res = DC_ERROR_UNEXPECTED;
1875         int i;
1876         struct dc_stream_state *del_streams[MAX_PIPES];
1877         int del_streams_count = 0;
1878
1879         memset(del_streams, 0, sizeof(del_streams));
1880
1881         context = dc_create_state(dc);
1882         if (context == NULL)
1883                 goto context_alloc_fail;
1884
1885         dc_resource_state_copy_construct_current(dc, context);
1886
1887         /* First remove from context all streams */
1888         for (i = 0; i < context->stream_count; i++) {
1889                 struct dc_stream_state *stream = context->streams[i];
1890
1891                 del_streams[del_streams_count++] = stream;
1892         }
1893
1894         /* Remove all planes for removed streams and then remove the streams */
1895         for (i = 0; i < del_streams_count; i++) {
1896                 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1897                         res = DC_FAIL_DETACH_SURFACES;
1898                         goto fail;
1899                 }
1900
1901                 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1902                 if (res != DC_OK)
1903                         goto fail;
1904         }
1905
1906
1907         res = dc_validate_global_state(dc, context, false);
1908
1909         if (res != DC_OK) {
1910                 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1911                 goto fail;
1912         }
1913
1914         res = dc_commit_state(dc, context);
1915
1916 fail:
1917         dc_release_state(context);
1918
1919 context_alloc_fail:
1920         return res;
1921 }
1922
1923 static int dm_suspend(void *handle)
1924 {
1925         struct amdgpu_device *adev = handle;
1926         struct amdgpu_display_manager *dm = &adev->dm;
1927         int ret = 0;
1928
1929         if (amdgpu_in_reset(adev)) {
1930                 mutex_lock(&dm->dc_lock);
1931
1932 #if defined(CONFIG_DRM_AMD_DC_DCN)
1933                 dc_allow_idle_optimizations(adev->dm.dc, false);
1934 #endif
1935
1936                 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1937
1938                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1939
1940                 amdgpu_dm_commit_zero_streams(dm->dc);
1941
1942                 amdgpu_dm_irq_suspend(adev);
1943
1944                 return ret;
1945         }
1946
1947 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
1948         amdgpu_dm_crtc_secure_display_suspend(adev);
1949 #endif
1950         WARN_ON(adev->dm.cached_state);
1951         adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1952
1953         s3_handle_mst(adev_to_drm(adev), true);
1954
1955         amdgpu_dm_irq_suspend(adev);
1956
1957
1958         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1959
1960         return 0;
1961 }
1962
1963 static struct amdgpu_dm_connector *
1964 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1965                                              struct drm_crtc *crtc)
1966 {
1967         uint32_t i;
1968         struct drm_connector_state *new_con_state;
1969         struct drm_connector *connector;
1970         struct drm_crtc *crtc_from_state;
1971
1972         for_each_new_connector_in_state(state, connector, new_con_state, i) {
1973                 crtc_from_state = new_con_state->crtc;
1974
1975                 if (crtc_from_state == crtc)
1976                         return to_amdgpu_dm_connector(connector);
1977         }
1978
1979         return NULL;
1980 }
1981
1982 static void emulated_link_detect(struct dc_link *link)
1983 {
1984         struct dc_sink_init_data sink_init_data = { 0 };
1985         struct display_sink_capability sink_caps = { 0 };
1986         enum dc_edid_status edid_status;
1987         struct dc_context *dc_ctx = link->ctx;
1988         struct dc_sink *sink = NULL;
1989         struct dc_sink *prev_sink = NULL;
1990
1991         link->type = dc_connection_none;
1992         prev_sink = link->local_sink;
1993
1994         if (prev_sink)
1995                 dc_sink_release(prev_sink);
1996
1997         switch (link->connector_signal) {
1998         case SIGNAL_TYPE_HDMI_TYPE_A: {
1999                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2000                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2001                 break;
2002         }
2003
2004         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2005                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2006                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2007                 break;
2008         }
2009
2010         case SIGNAL_TYPE_DVI_DUAL_LINK: {
2011                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2012                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2013                 break;
2014         }
2015
2016         case SIGNAL_TYPE_LVDS: {
2017                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2018                 sink_caps.signal = SIGNAL_TYPE_LVDS;
2019                 break;
2020         }
2021
2022         case SIGNAL_TYPE_EDP: {
2023                 sink_caps.transaction_type =
2024                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2025                 sink_caps.signal = SIGNAL_TYPE_EDP;
2026                 break;
2027         }
2028
2029         case SIGNAL_TYPE_DISPLAY_PORT: {
2030                 sink_caps.transaction_type =
2031                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2032                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2033                 break;
2034         }
2035
2036         default:
2037                 DC_ERROR("Invalid connector type! signal:%d\n",
2038                         link->connector_signal);
2039                 return;
2040         }
2041
2042         sink_init_data.link = link;
2043         sink_init_data.sink_signal = sink_caps.signal;
2044
2045         sink = dc_sink_create(&sink_init_data);
2046         if (!sink) {
2047                 DC_ERROR("Failed to create sink!\n");
2048                 return;
2049         }
2050
2051         /* dc_sink_create returns a new reference */
2052         link->local_sink = sink;
2053
2054         edid_status = dm_helpers_read_local_edid(
2055                         link->ctx,
2056                         link,
2057                         sink);
2058
2059         if (edid_status != EDID_OK)
2060                 DC_ERROR("Failed to read EDID");
2061
2062 }
2063
2064 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2065                                      struct amdgpu_display_manager *dm)
2066 {
2067         struct {
2068                 struct dc_surface_update surface_updates[MAX_SURFACES];
2069                 struct dc_plane_info plane_infos[MAX_SURFACES];
2070                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2071                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2072                 struct dc_stream_update stream_update;
2073         } * bundle;
2074         int k, m;
2075
2076         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2077
2078         if (!bundle) {
2079                 dm_error("Failed to allocate update bundle\n");
2080                 goto cleanup;
2081         }
2082
2083         for (k = 0; k < dc_state->stream_count; k++) {
2084                 bundle->stream_update.stream = dc_state->streams[k];
2085
2086                 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2087                         bundle->surface_updates[m].surface =
2088                                 dc_state->stream_status->plane_states[m];
2089                         bundle->surface_updates[m].surface->force_full_update =
2090                                 true;
2091                 }
2092                 dc_commit_updates_for_stream(
2093                         dm->dc, bundle->surface_updates,
2094                         dc_state->stream_status->plane_count,
2095                         dc_state->streams[k], &bundle->stream_update, dc_state);
2096         }
2097
2098 cleanup:
2099         kfree(bundle);
2100
2101         return;
2102 }
2103
2104 static void dm_set_dpms_off(struct dc_link *link)
2105 {
2106         struct dc_stream_state *stream_state;
2107         struct amdgpu_dm_connector *aconnector = link->priv;
2108         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2109         struct dc_stream_update stream_update;
2110         bool dpms_off = true;
2111
2112         memset(&stream_update, 0, sizeof(stream_update));
2113         stream_update.dpms_off = &dpms_off;
2114
2115         mutex_lock(&adev->dm.dc_lock);
2116         stream_state = dc_stream_find_from_link(link);
2117
2118         if (stream_state == NULL) {
2119                 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2120                 mutex_unlock(&adev->dm.dc_lock);
2121                 return;
2122         }
2123
2124         stream_update.stream = stream_state;
2125         dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2126                                      stream_state, &stream_update,
2127                                      stream_state->ctx->dc->current_state);
2128         mutex_unlock(&adev->dm.dc_lock);
2129 }
2130
2131 static int dm_resume(void *handle)
2132 {
2133         struct amdgpu_device *adev = handle;
2134         struct drm_device *ddev = adev_to_drm(adev);
2135         struct amdgpu_display_manager *dm = &adev->dm;
2136         struct amdgpu_dm_connector *aconnector;
2137         struct drm_connector *connector;
2138         struct drm_connector_list_iter iter;
2139         struct drm_crtc *crtc;
2140         struct drm_crtc_state *new_crtc_state;
2141         struct dm_crtc_state *dm_new_crtc_state;
2142         struct drm_plane *plane;
2143         struct drm_plane_state *new_plane_state;
2144         struct dm_plane_state *dm_new_plane_state;
2145         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2146         enum dc_connection_type new_connection_type = dc_connection_none;
2147         struct dc_state *dc_state;
2148         int i, r, j;
2149
2150         if (amdgpu_in_reset(adev)) {
2151                 dc_state = dm->cached_dc_state;
2152
2153                 r = dm_dmub_hw_init(adev);
2154                 if (r)
2155                         DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2156
2157                 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2158                 dc_resume(dm->dc);
2159
2160                 amdgpu_dm_irq_resume_early(adev);
2161
2162                 for (i = 0; i < dc_state->stream_count; i++) {
2163                         dc_state->streams[i]->mode_changed = true;
2164                         for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2165                                 dc_state->stream_status->plane_states[j]->update_flags.raw
2166                                         = 0xffffffff;
2167                         }
2168                 }
2169
2170                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2171
2172                 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2173
2174                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2175
2176                 dc_release_state(dm->cached_dc_state);
2177                 dm->cached_dc_state = NULL;
2178
2179                 amdgpu_dm_irq_resume_late(adev);
2180
2181                 mutex_unlock(&dm->dc_lock);
2182
2183                 return 0;
2184         }
2185         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2186         dc_release_state(dm_state->context);
2187         dm_state->context = dc_create_state(dm->dc);
2188         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2189         dc_resource_state_construct(dm->dc, dm_state->context);
2190
2191         /* Before powering on DC we need to re-initialize DMUB. */
2192         r = dm_dmub_hw_init(adev);
2193         if (r)
2194                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2195
2196         /* power on hardware */
2197         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2198
2199         /* program HPD filter */
2200         dc_resume(dm->dc);
2201
2202         /*
2203          * early enable HPD Rx IRQ, should be done before set mode as short
2204          * pulse interrupts are used for MST
2205          */
2206         amdgpu_dm_irq_resume_early(adev);
2207
2208         /* On resume we need to rewrite the MSTM control bits to enable MST*/
2209         s3_handle_mst(ddev, false);
2210
2211         /* Do detection*/
2212         drm_connector_list_iter_begin(ddev, &iter);
2213         drm_for_each_connector_iter(connector, &iter) {
2214                 aconnector = to_amdgpu_dm_connector(connector);
2215
2216                 /*
2217                  * this is the case when traversing through already created
2218                  * MST connectors, should be skipped
2219                  */
2220                 if (aconnector->mst_port)
2221                         continue;
2222
2223                 mutex_lock(&aconnector->hpd_lock);
2224                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2225                         DRM_ERROR("KMS: Failed to detect connector\n");
2226
2227                 if (aconnector->base.force && new_connection_type == dc_connection_none)
2228                         emulated_link_detect(aconnector->dc_link);
2229                 else
2230                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2231
2232                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2233                         aconnector->fake_enable = false;
2234
2235                 if (aconnector->dc_sink)
2236                         dc_sink_release(aconnector->dc_sink);
2237                 aconnector->dc_sink = NULL;
2238                 amdgpu_dm_update_connector_after_detect(aconnector);
2239                 mutex_unlock(&aconnector->hpd_lock);
2240         }
2241         drm_connector_list_iter_end(&iter);
2242
2243         /* Force mode set in atomic commit */
2244         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2245                 new_crtc_state->active_changed = true;
2246
2247         /*
2248          * atomic_check is expected to create the dc states. We need to release
2249          * them here, since they were duplicated as part of the suspend
2250          * procedure.
2251          */
2252         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2253                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2254                 if (dm_new_crtc_state->stream) {
2255                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2256                         dc_stream_release(dm_new_crtc_state->stream);
2257                         dm_new_crtc_state->stream = NULL;
2258                 }
2259         }
2260
2261         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2262                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2263                 if (dm_new_plane_state->dc_state) {
2264                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2265                         dc_plane_state_release(dm_new_plane_state->dc_state);
2266                         dm_new_plane_state->dc_state = NULL;
2267                 }
2268         }
2269
2270         drm_atomic_helper_resume(ddev, dm->cached_state);
2271
2272         dm->cached_state = NULL;
2273
2274 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
2275         amdgpu_dm_crtc_secure_display_resume(adev);
2276 #endif
2277
2278         amdgpu_dm_irq_resume_late(adev);
2279
2280         amdgpu_dm_smu_write_watermarks_table(adev);
2281
2282         return 0;
2283 }
2284
2285 /**
2286  * DOC: DM Lifecycle
2287  *
2288  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2289  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2290  * the base driver's device list to be initialized and torn down accordingly.
2291  *
2292  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2293  */
2294
2295 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2296         .name = "dm",
2297         .early_init = dm_early_init,
2298         .late_init = dm_late_init,
2299         .sw_init = dm_sw_init,
2300         .sw_fini = dm_sw_fini,
2301         .hw_init = dm_hw_init,
2302         .hw_fini = dm_hw_fini,
2303         .suspend = dm_suspend,
2304         .resume = dm_resume,
2305         .is_idle = dm_is_idle,
2306         .wait_for_idle = dm_wait_for_idle,
2307         .check_soft_reset = dm_check_soft_reset,
2308         .soft_reset = dm_soft_reset,
2309         .set_clockgating_state = dm_set_clockgating_state,
2310         .set_powergating_state = dm_set_powergating_state,
2311 };
2312
2313 const struct amdgpu_ip_block_version dm_ip_block =
2314 {
2315         .type = AMD_IP_BLOCK_TYPE_DCE,
2316         .major = 1,
2317         .minor = 0,
2318         .rev = 0,
2319         .funcs = &amdgpu_dm_funcs,
2320 };
2321
2322
2323 /**
2324  * DOC: atomic
2325  *
2326  * *WIP*
2327  */
2328
2329 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2330         .fb_create = amdgpu_display_user_framebuffer_create,
2331         .get_format_info = amd_get_format_info,
2332         .output_poll_changed = drm_fb_helper_output_poll_changed,
2333         .atomic_check = amdgpu_dm_atomic_check,
2334         .atomic_commit = drm_atomic_helper_commit,
2335 };
2336
2337 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2338         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2339 };
2340
2341 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2342 {
2343         u32 max_cll, min_cll, max, min, q, r;
2344         struct amdgpu_dm_backlight_caps *caps;
2345         struct amdgpu_display_manager *dm;
2346         struct drm_connector *conn_base;
2347         struct amdgpu_device *adev;
2348         struct dc_link *link = NULL;
2349         static const u8 pre_computed_values[] = {
2350                 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2351                 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2352
2353         if (!aconnector || !aconnector->dc_link)
2354                 return;
2355
2356         link = aconnector->dc_link;
2357         if (link->connector_signal != SIGNAL_TYPE_EDP)
2358                 return;
2359
2360         conn_base = &aconnector->base;
2361         adev = drm_to_adev(conn_base->dev);
2362         dm = &adev->dm;
2363         caps = &dm->backlight_caps;
2364         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2365         caps->aux_support = false;
2366         max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2367         min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2368
2369         if (caps->ext_caps->bits.oled == 1 ||
2370             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2371             caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2372                 caps->aux_support = true;
2373
2374         if (amdgpu_backlight == 0)
2375                 caps->aux_support = false;
2376         else if (amdgpu_backlight == 1)
2377                 caps->aux_support = true;
2378
2379         /* From the specification (CTA-861-G), for calculating the maximum
2380          * luminance we need to use:
2381          *      Luminance = 50*2**(CV/32)
2382          * Where CV is a one-byte value.
2383          * For calculating this expression we may need float point precision;
2384          * to avoid this complexity level, we take advantage that CV is divided
2385          * by a constant. From the Euclids division algorithm, we know that CV
2386          * can be written as: CV = 32*q + r. Next, we replace CV in the
2387          * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2388          * need to pre-compute the value of r/32. For pre-computing the values
2389          * We just used the following Ruby line:
2390          *      (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2391          * The results of the above expressions can be verified at
2392          * pre_computed_values.
2393          */
2394         q = max_cll >> 5;
2395         r = max_cll % 32;
2396         max = (1 << q) * pre_computed_values[r];
2397
2398         // min luminance: maxLum * (CV/255)^2 / 100
2399         q = DIV_ROUND_CLOSEST(min_cll, 255);
2400         min = max * DIV_ROUND_CLOSEST((q * q), 100);
2401
2402         caps->aux_max_input_signal = max;
2403         caps->aux_min_input_signal = min;
2404 }
2405
2406 void amdgpu_dm_update_connector_after_detect(
2407                 struct amdgpu_dm_connector *aconnector)
2408 {
2409         struct drm_connector *connector = &aconnector->base;
2410         struct drm_device *dev = connector->dev;
2411         struct dc_sink *sink;
2412
2413         /* MST handled by drm_mst framework */
2414         if (aconnector->mst_mgr.mst_state == true)
2415                 return;
2416
2417         sink = aconnector->dc_link->local_sink;
2418         if (sink)
2419                 dc_sink_retain(sink);
2420
2421         /*
2422          * Edid mgmt connector gets first update only in mode_valid hook and then
2423          * the connector sink is set to either fake or physical sink depends on link status.
2424          * Skip if already done during boot.
2425          */
2426         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2427                         && aconnector->dc_em_sink) {
2428
2429                 /*
2430                  * For S3 resume with headless use eml_sink to fake stream
2431                  * because on resume connector->sink is set to NULL
2432                  */
2433                 mutex_lock(&dev->mode_config.mutex);
2434
2435                 if (sink) {
2436                         if (aconnector->dc_sink) {
2437                                 amdgpu_dm_update_freesync_caps(connector, NULL);
2438                                 /*
2439                                  * retain and release below are used to
2440                                  * bump up refcount for sink because the link doesn't point
2441                                  * to it anymore after disconnect, so on next crtc to connector
2442                                  * reshuffle by UMD we will get into unwanted dc_sink release
2443                                  */
2444                                 dc_sink_release(aconnector->dc_sink);
2445                         }
2446                         aconnector->dc_sink = sink;
2447                         dc_sink_retain(aconnector->dc_sink);
2448                         amdgpu_dm_update_freesync_caps(connector,
2449                                         aconnector->edid);
2450                 } else {
2451                         amdgpu_dm_update_freesync_caps(connector, NULL);
2452                         if (!aconnector->dc_sink) {
2453                                 aconnector->dc_sink = aconnector->dc_em_sink;
2454                                 dc_sink_retain(aconnector->dc_sink);
2455                         }
2456                 }
2457
2458                 mutex_unlock(&dev->mode_config.mutex);
2459
2460                 if (sink)
2461                         dc_sink_release(sink);
2462                 return;
2463         }
2464
2465         /*
2466          * TODO: temporary guard to look for proper fix
2467          * if this sink is MST sink, we should not do anything
2468          */
2469         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2470                 dc_sink_release(sink);
2471                 return;
2472         }
2473
2474         if (aconnector->dc_sink == sink) {
2475                 /*
2476                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
2477                  * Do nothing!!
2478                  */
2479                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2480                                 aconnector->connector_id);
2481                 if (sink)
2482                         dc_sink_release(sink);
2483                 return;
2484         }
2485
2486         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2487                 aconnector->connector_id, aconnector->dc_sink, sink);
2488
2489         mutex_lock(&dev->mode_config.mutex);
2490
2491         /*
2492          * 1. Update status of the drm connector
2493          * 2. Send an event and let userspace tell us what to do
2494          */
2495         if (sink) {
2496                 /*
2497                  * TODO: check if we still need the S3 mode update workaround.
2498                  * If yes, put it here.
2499                  */
2500                 if (aconnector->dc_sink) {
2501                         amdgpu_dm_update_freesync_caps(connector, NULL);
2502                         dc_sink_release(aconnector->dc_sink);
2503                 }
2504
2505                 aconnector->dc_sink = sink;
2506                 dc_sink_retain(aconnector->dc_sink);
2507                 if (sink->dc_edid.length == 0) {
2508                         aconnector->edid = NULL;
2509                         if (aconnector->dc_link->aux_mode) {
2510                                 drm_dp_cec_unset_edid(
2511                                         &aconnector->dm_dp_aux.aux);
2512                         }
2513                 } else {
2514                         aconnector->edid =
2515                                 (struct edid *)sink->dc_edid.raw_edid;
2516
2517                         drm_connector_update_edid_property(connector,
2518                                                            aconnector->edid);
2519                         if (aconnector->dc_link->aux_mode)
2520                                 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2521                                                     aconnector->edid);
2522                 }
2523
2524                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2525                 update_connector_ext_caps(aconnector);
2526         } else {
2527                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2528                 amdgpu_dm_update_freesync_caps(connector, NULL);
2529                 drm_connector_update_edid_property(connector, NULL);
2530                 aconnector->num_modes = 0;
2531                 dc_sink_release(aconnector->dc_sink);
2532                 aconnector->dc_sink = NULL;
2533                 aconnector->edid = NULL;
2534 #ifdef CONFIG_DRM_AMD_DC_HDCP
2535                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2536                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2537                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2538 #endif
2539         }
2540
2541         mutex_unlock(&dev->mode_config.mutex);
2542
2543         update_subconnector_property(aconnector);
2544
2545         if (sink)
2546                 dc_sink_release(sink);
2547 }
2548
2549 static void handle_hpd_irq(void *param)
2550 {
2551         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2552         struct drm_connector *connector = &aconnector->base;
2553         struct drm_device *dev = connector->dev;
2554         enum dc_connection_type new_connection_type = dc_connection_none;
2555 #ifdef CONFIG_DRM_AMD_DC_HDCP
2556         struct amdgpu_device *adev = drm_to_adev(dev);
2557         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2558 #endif
2559
2560         /*
2561          * In case of failure or MST no need to update connector status or notify the OS
2562          * since (for MST case) MST does this in its own context.
2563          */
2564         mutex_lock(&aconnector->hpd_lock);
2565
2566 #ifdef CONFIG_DRM_AMD_DC_HDCP
2567         if (adev->dm.hdcp_workqueue) {
2568                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2569                 dm_con_state->update_hdcp = true;
2570         }
2571 #endif
2572         if (aconnector->fake_enable)
2573                 aconnector->fake_enable = false;
2574
2575         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2576                 DRM_ERROR("KMS: Failed to detect connector\n");
2577
2578         if (aconnector->base.force && new_connection_type == dc_connection_none) {
2579                 emulated_link_detect(aconnector->dc_link);
2580
2581
2582                 drm_modeset_lock_all(dev);
2583                 dm_restore_drm_connector_state(dev, connector);
2584                 drm_modeset_unlock_all(dev);
2585
2586                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2587                         drm_kms_helper_hotplug_event(dev);
2588
2589         } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2590                 if (new_connection_type == dc_connection_none &&
2591                     aconnector->dc_link->type == dc_connection_none)
2592                         dm_set_dpms_off(aconnector->dc_link);
2593
2594                 amdgpu_dm_update_connector_after_detect(aconnector);
2595
2596                 drm_modeset_lock_all(dev);
2597                 dm_restore_drm_connector_state(dev, connector);
2598                 drm_modeset_unlock_all(dev);
2599
2600                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2601                         drm_kms_helper_hotplug_event(dev);
2602         }
2603         mutex_unlock(&aconnector->hpd_lock);
2604
2605 }
2606
2607 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2608 {
2609         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2610         uint8_t dret;
2611         bool new_irq_handled = false;
2612         int dpcd_addr;
2613         int dpcd_bytes_to_read;
2614
2615         const int max_process_count = 30;
2616         int process_count = 0;
2617
2618         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2619
2620         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2621                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2622                 /* DPCD 0x200 - 0x201 for downstream IRQ */
2623                 dpcd_addr = DP_SINK_COUNT;
2624         } else {
2625                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2626                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2627                 dpcd_addr = DP_SINK_COUNT_ESI;
2628         }
2629
2630         dret = drm_dp_dpcd_read(
2631                 &aconnector->dm_dp_aux.aux,
2632                 dpcd_addr,
2633                 esi,
2634                 dpcd_bytes_to_read);
2635
2636         while (dret == dpcd_bytes_to_read &&
2637                 process_count < max_process_count) {
2638                 uint8_t retry;
2639                 dret = 0;
2640
2641                 process_count++;
2642
2643                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2644                 /* handle HPD short pulse irq */
2645                 if (aconnector->mst_mgr.mst_state)
2646                         drm_dp_mst_hpd_irq(
2647                                 &aconnector->mst_mgr,
2648                                 esi,
2649                                 &new_irq_handled);
2650
2651                 if (new_irq_handled) {
2652                         /* ACK at DPCD to notify down stream */
2653                         const int ack_dpcd_bytes_to_write =
2654                                 dpcd_bytes_to_read - 1;
2655
2656                         for (retry = 0; retry < 3; retry++) {
2657                                 uint8_t wret;
2658
2659                                 wret = drm_dp_dpcd_write(
2660                                         &aconnector->dm_dp_aux.aux,
2661                                         dpcd_addr + 1,
2662                                         &esi[1],
2663                                         ack_dpcd_bytes_to_write);
2664                                 if (wret == ack_dpcd_bytes_to_write)
2665                                         break;
2666                         }
2667
2668                         /* check if there is new irq to be handled */
2669                         dret = drm_dp_dpcd_read(
2670                                 &aconnector->dm_dp_aux.aux,
2671                                 dpcd_addr,
2672                                 esi,
2673                                 dpcd_bytes_to_read);
2674
2675                         new_irq_handled = false;
2676                 } else {
2677                         break;
2678                 }
2679         }
2680
2681         if (process_count == max_process_count)
2682                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2683 }
2684
2685 static void handle_hpd_rx_irq(void *param)
2686 {
2687         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2688         struct drm_connector *connector = &aconnector->base;
2689         struct drm_device *dev = connector->dev;
2690         struct dc_link *dc_link = aconnector->dc_link;
2691         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2692         bool result = false;
2693         enum dc_connection_type new_connection_type = dc_connection_none;
2694         struct amdgpu_device *adev = drm_to_adev(dev);
2695         union hpd_irq_data hpd_irq_data;
2696
2697         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2698
2699         /*
2700          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2701          * conflict, after implement i2c helper, this mutex should be
2702          * retired.
2703          */
2704         if (dc_link->type != dc_connection_mst_branch)
2705                 mutex_lock(&aconnector->hpd_lock);
2706
2707         read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2708
2709         if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2710                 (dc_link->type == dc_connection_mst_branch)) {
2711                 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2712                         result = true;
2713                         dm_handle_hpd_rx_irq(aconnector);
2714                         goto out;
2715                 } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2716                         result = false;
2717                         dm_handle_hpd_rx_irq(aconnector);
2718                         goto out;
2719                 }
2720         }
2721
2722         mutex_lock(&adev->dm.dc_lock);
2723 #ifdef CONFIG_DRM_AMD_DC_HDCP
2724         result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2725 #else
2726         result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2727 #endif
2728         mutex_unlock(&adev->dm.dc_lock);
2729
2730 out:
2731         if (result && !is_mst_root_connector) {
2732                 /* Downstream Port status changed. */
2733                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2734                         DRM_ERROR("KMS: Failed to detect connector\n");
2735
2736                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2737                         emulated_link_detect(dc_link);
2738
2739                         if (aconnector->fake_enable)
2740                                 aconnector->fake_enable = false;
2741
2742                         amdgpu_dm_update_connector_after_detect(aconnector);
2743
2744
2745                         drm_modeset_lock_all(dev);
2746                         dm_restore_drm_connector_state(dev, connector);
2747                         drm_modeset_unlock_all(dev);
2748
2749                         drm_kms_helper_hotplug_event(dev);
2750                 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2751
2752                         if (aconnector->fake_enable)
2753                                 aconnector->fake_enable = false;
2754
2755                         amdgpu_dm_update_connector_after_detect(aconnector);
2756
2757
2758                         drm_modeset_lock_all(dev);
2759                         dm_restore_drm_connector_state(dev, connector);
2760                         drm_modeset_unlock_all(dev);
2761
2762                         drm_kms_helper_hotplug_event(dev);
2763                 }
2764         }
2765 #ifdef CONFIG_DRM_AMD_DC_HDCP
2766         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2767                 if (adev->dm.hdcp_workqueue)
2768                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2769         }
2770 #endif
2771
2772         if (dc_link->type != dc_connection_mst_branch) {
2773                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2774                 mutex_unlock(&aconnector->hpd_lock);
2775         }
2776 }
2777
2778 static void register_hpd_handlers(struct amdgpu_device *adev)
2779 {
2780         struct drm_device *dev = adev_to_drm(adev);
2781         struct drm_connector *connector;
2782         struct amdgpu_dm_connector *aconnector;
2783         const struct dc_link *dc_link;
2784         struct dc_interrupt_params int_params = {0};
2785
2786         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2787         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2788
2789         list_for_each_entry(connector,
2790                         &dev->mode_config.connector_list, head) {
2791
2792                 aconnector = to_amdgpu_dm_connector(connector);
2793                 dc_link = aconnector->dc_link;
2794
2795                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2796                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2797                         int_params.irq_source = dc_link->irq_source_hpd;
2798
2799                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2800                                         handle_hpd_irq,
2801                                         (void *) aconnector);
2802                 }
2803
2804                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2805
2806                         /* Also register for DP short pulse (hpd_rx). */
2807                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2808                         int_params.irq_source = dc_link->irq_source_hpd_rx;
2809
2810                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2811                                         handle_hpd_rx_irq,
2812                                         (void *) aconnector);
2813                 }
2814         }
2815 }
2816
2817 #if defined(CONFIG_DRM_AMD_DC_SI)
2818 /* Register IRQ sources and initialize IRQ callbacks */
2819 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2820 {
2821         struct dc *dc = adev->dm.dc;
2822         struct common_irq_params *c_irq_params;
2823         struct dc_interrupt_params int_params = {0};
2824         int r;
2825         int i;
2826         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2827
2828         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2829         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2830
2831         /*
2832          * Actions of amdgpu_irq_add_id():
2833          * 1. Register a set() function with base driver.
2834          *    Base driver will call set() function to enable/disable an
2835          *    interrupt in DC hardware.
2836          * 2. Register amdgpu_dm_irq_handler().
2837          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2838          *    coming from DC hardware.
2839          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2840          *    for acknowledging and handling. */
2841
2842         /* Use VBLANK interrupt */
2843         for (i = 0; i < adev->mode_info.num_crtc; i++) {
2844                 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2845                 if (r) {
2846                         DRM_ERROR("Failed to add crtc irq id!\n");
2847                         return r;
2848                 }
2849
2850                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2851                 int_params.irq_source =
2852                         dc_interrupt_to_irq_source(dc, i+1 , 0);
2853
2854                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2855
2856                 c_irq_params->adev = adev;
2857                 c_irq_params->irq_src = int_params.irq_source;
2858
2859                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2860                                 dm_crtc_high_irq, c_irq_params);
2861         }
2862
2863         /* Use GRPH_PFLIP interrupt */
2864         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2865                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2866                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2867                 if (r) {
2868                         DRM_ERROR("Failed to add page flip irq id!\n");
2869                         return r;
2870                 }
2871
2872                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2873                 int_params.irq_source =
2874                         dc_interrupt_to_irq_source(dc, i, 0);
2875
2876                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2877
2878                 c_irq_params->adev = adev;
2879                 c_irq_params->irq_src = int_params.irq_source;
2880
2881                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2882                                 dm_pflip_high_irq, c_irq_params);
2883
2884         }
2885
2886         /* HPD */
2887         r = amdgpu_irq_add_id(adev, client_id,
2888                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2889         if (r) {
2890                 DRM_ERROR("Failed to add hpd irq id!\n");
2891                 return r;
2892         }
2893
2894         register_hpd_handlers(adev);
2895
2896         return 0;
2897 }
2898 #endif
2899
2900 /* Register IRQ sources and initialize IRQ callbacks */
2901 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2902 {
2903         struct dc *dc = adev->dm.dc;
2904         struct common_irq_params *c_irq_params;
2905         struct dc_interrupt_params int_params = {0};
2906         int r;
2907         int i;
2908         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2909
2910         if (adev->asic_type >= CHIP_VEGA10)
2911                 client_id = SOC15_IH_CLIENTID_DCE;
2912
2913         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2914         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2915
2916         /*
2917          * Actions of amdgpu_irq_add_id():
2918          * 1. Register a set() function with base driver.
2919          *    Base driver will call set() function to enable/disable an
2920          *    interrupt in DC hardware.
2921          * 2. Register amdgpu_dm_irq_handler().
2922          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2923          *    coming from DC hardware.
2924          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2925          *    for acknowledging and handling. */
2926
2927         /* Use VBLANK interrupt */
2928         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2929                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2930                 if (r) {
2931                         DRM_ERROR("Failed to add crtc irq id!\n");
2932                         return r;
2933                 }
2934
2935                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2936                 int_params.irq_source =
2937                         dc_interrupt_to_irq_source(dc, i, 0);
2938
2939                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2940
2941                 c_irq_params->adev = adev;
2942                 c_irq_params->irq_src = int_params.irq_source;
2943
2944                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2945                                 dm_crtc_high_irq, c_irq_params);
2946         }
2947
2948         /* Use VUPDATE interrupt */
2949         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2950                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2951                 if (r) {
2952                         DRM_ERROR("Failed to add vupdate irq id!\n");
2953                         return r;
2954                 }
2955
2956                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2957                 int_params.irq_source =
2958                         dc_interrupt_to_irq_source(dc, i, 0);
2959
2960                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2961
2962                 c_irq_params->adev = adev;
2963                 c_irq_params->irq_src = int_params.irq_source;
2964
2965                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2966                                 dm_vupdate_high_irq, c_irq_params);
2967         }
2968
2969         /* Use GRPH_PFLIP interrupt */
2970         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2971                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2972                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2973                 if (r) {
2974                         DRM_ERROR("Failed to add page flip irq id!\n");
2975                         return r;
2976                 }
2977
2978                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2979                 int_params.irq_source =
2980                         dc_interrupt_to_irq_source(dc, i, 0);
2981
2982                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2983
2984                 c_irq_params->adev = adev;
2985                 c_irq_params->irq_src = int_params.irq_source;
2986
2987                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2988                                 dm_pflip_high_irq, c_irq_params);
2989
2990         }
2991
2992         /* HPD */
2993         r = amdgpu_irq_add_id(adev, client_id,
2994                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2995         if (r) {
2996                 DRM_ERROR("Failed to add hpd irq id!\n");
2997                 return r;
2998         }
2999
3000         register_hpd_handlers(adev);
3001
3002         return 0;
3003 }
3004
3005 #if defined(CONFIG_DRM_AMD_DC_DCN)
3006 /* Register IRQ sources and initialize IRQ callbacks */
3007 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3008 {
3009         struct dc *dc = adev->dm.dc;
3010         struct common_irq_params *c_irq_params;
3011         struct dc_interrupt_params int_params = {0};
3012         int r;
3013         int i;
3014 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3015         static const unsigned int vrtl_int_srcid[] = {
3016                 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3017                 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3018                 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3019                 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3020                 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3021                 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3022         };
3023 #endif
3024
3025         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3026         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3027
3028         /*
3029          * Actions of amdgpu_irq_add_id():
3030          * 1. Register a set() function with base driver.
3031          *    Base driver will call set() function to enable/disable an
3032          *    interrupt in DC hardware.
3033          * 2. Register amdgpu_dm_irq_handler().
3034          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3035          *    coming from DC hardware.
3036          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3037          *    for acknowledging and handling.
3038          */
3039
3040         /* Use VSTARTUP interrupt */
3041         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3042                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3043                         i++) {
3044                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3045
3046                 if (r) {
3047                         DRM_ERROR("Failed to add crtc irq id!\n");
3048                         return r;
3049                 }
3050
3051                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3052                 int_params.irq_source =
3053                         dc_interrupt_to_irq_source(dc, i, 0);
3054
3055                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3056
3057                 c_irq_params->adev = adev;
3058                 c_irq_params->irq_src = int_params.irq_source;
3059
3060                 amdgpu_dm_irq_register_interrupt(
3061                         adev, &int_params, dm_crtc_high_irq, c_irq_params);
3062         }
3063
3064         /* Use otg vertical line interrupt */
3065 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3066         for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3067                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3068                                 vrtl_int_srcid[i], &adev->vline0_irq);
3069
3070                 if (r) {
3071                         DRM_ERROR("Failed to add vline0 irq id!\n");
3072                         return r;
3073                 }
3074
3075                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3076                 int_params.irq_source =
3077                         dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3078
3079                 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3080                         DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3081                         break;
3082                 }
3083
3084                 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3085                                         - DC_IRQ_SOURCE_DC1_VLINE0];
3086
3087                 c_irq_params->adev = adev;
3088                 c_irq_params->irq_src = int_params.irq_source;
3089
3090                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3091                                 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3092         }
3093 #endif
3094
3095         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3096          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3097          * to trigger at end of each vblank, regardless of state of the lock,
3098          * matching DCE behaviour.
3099          */
3100         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3101              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3102              i++) {
3103                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3104
3105                 if (r) {
3106                         DRM_ERROR("Failed to add vupdate irq id!\n");
3107                         return r;
3108                 }
3109
3110                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3111                 int_params.irq_source =
3112                         dc_interrupt_to_irq_source(dc, i, 0);
3113
3114                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3115
3116                 c_irq_params->adev = adev;
3117                 c_irq_params->irq_src = int_params.irq_source;
3118
3119                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3120                                 dm_vupdate_high_irq, c_irq_params);
3121         }
3122
3123         /* Use GRPH_PFLIP interrupt */
3124         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3125                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3126                         i++) {
3127                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3128                 if (r) {
3129                         DRM_ERROR("Failed to add page flip irq id!\n");
3130                         return r;
3131                 }
3132
3133                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3134                 int_params.irq_source =
3135                         dc_interrupt_to_irq_source(dc, i, 0);
3136
3137                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3138
3139                 c_irq_params->adev = adev;
3140                 c_irq_params->irq_src = int_params.irq_source;
3141
3142                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3143                                 dm_pflip_high_irq, c_irq_params);
3144
3145         }
3146
3147         if (dc->ctx->dmub_srv) {
3148                 i = DCN_1_0__SRCID__DMCUB_OUTBOX_HIGH_PRIORITY_READY_INT;
3149                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->dmub_trace_irq);
3150
3151                 if (r) {
3152                         DRM_ERROR("Failed to add dmub trace irq id!\n");
3153                         return r;
3154                 }
3155
3156                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3157                 int_params.irq_source =
3158                         dc_interrupt_to_irq_source(dc, i, 0);
3159
3160                 c_irq_params = &adev->dm.dmub_trace_params[0];
3161
3162                 c_irq_params->adev = adev;
3163                 c_irq_params->irq_src = int_params.irq_source;
3164
3165                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3166                                 dm_dmub_trace_high_irq, c_irq_params);
3167         }
3168
3169         /* HPD */
3170         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3171                         &adev->hpd_irq);
3172         if (r) {
3173                 DRM_ERROR("Failed to add hpd irq id!\n");
3174                 return r;
3175         }
3176
3177         register_hpd_handlers(adev);
3178
3179         return 0;
3180 }
3181 #endif
3182
3183 /*
3184  * Acquires the lock for the atomic state object and returns
3185  * the new atomic state.
3186  *
3187  * This should only be called during atomic check.
3188  */
3189 static int dm_atomic_get_state(struct drm_atomic_state *state,
3190                                struct dm_atomic_state **dm_state)
3191 {
3192         struct drm_device *dev = state->dev;
3193         struct amdgpu_device *adev = drm_to_adev(dev);
3194         struct amdgpu_display_manager *dm = &adev->dm;
3195         struct drm_private_state *priv_state;
3196
3197         if (*dm_state)
3198                 return 0;
3199
3200         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3201         if (IS_ERR(priv_state))
3202                 return PTR_ERR(priv_state);
3203
3204         *dm_state = to_dm_atomic_state(priv_state);
3205
3206         return 0;
3207 }
3208
3209 static struct dm_atomic_state *
3210 dm_atomic_get_new_state(struct drm_atomic_state *state)
3211 {
3212         struct drm_device *dev = state->dev;
3213         struct amdgpu_device *adev = drm_to_adev(dev);
3214         struct amdgpu_display_manager *dm = &adev->dm;
3215         struct drm_private_obj *obj;
3216         struct drm_private_state *new_obj_state;
3217         int i;
3218
3219         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3220                 if (obj->funcs == dm->atomic_obj.funcs)
3221                         return to_dm_atomic_state(new_obj_state);
3222         }
3223
3224         return NULL;
3225 }
3226
3227 static struct drm_private_state *
3228 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3229 {
3230         struct dm_atomic_state *old_state, *new_state;
3231
3232         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3233         if (!new_state)
3234                 return NULL;
3235
3236         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3237
3238         old_state = to_dm_atomic_state(obj->state);
3239
3240         if (old_state && old_state->context)
3241                 new_state->context = dc_copy_state(old_state->context);
3242
3243         if (!new_state->context) {
3244                 kfree(new_state);
3245                 return NULL;
3246         }
3247
3248         return &new_state->base;
3249 }
3250
3251 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3252                                     struct drm_private_state *state)
3253 {
3254         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3255
3256         if (dm_state && dm_state->context)
3257                 dc_release_state(dm_state->context);
3258
3259         kfree(dm_state);
3260 }
3261
3262 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3263         .atomic_duplicate_state = dm_atomic_duplicate_state,
3264         .atomic_destroy_state = dm_atomic_destroy_state,
3265 };
3266
3267 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3268 {
3269         struct dm_atomic_state *state;
3270         int r;
3271
3272         adev->mode_info.mode_config_initialized = true;
3273
3274         adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3275         adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3276
3277         adev_to_drm(adev)->mode_config.max_width = 16384;
3278         adev_to_drm(adev)->mode_config.max_height = 16384;
3279
3280         adev_to_drm(adev)->mode_config.preferred_depth = 24;
3281         adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3282         /* indicates support for immediate flip */
3283         adev_to_drm(adev)->mode_config.async_page_flip = true;
3284
3285         adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3286
3287         state = kzalloc(sizeof(*state), GFP_KERNEL);
3288         if (!state)
3289                 return -ENOMEM;
3290
3291         state->context = dc_create_state(adev->dm.dc);
3292         if (!state->context) {
3293                 kfree(state);
3294                 return -ENOMEM;
3295         }
3296
3297         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3298
3299         drm_atomic_private_obj_init(adev_to_drm(adev),
3300                                     &adev->dm.atomic_obj,
3301                                     &state->base,
3302                                     &dm_atomic_state_funcs);
3303
3304         r = amdgpu_display_modeset_create_props(adev);
3305         if (r) {
3306                 dc_release_state(state->context);
3307                 kfree(state);
3308                 return r;
3309         }
3310
3311         r = amdgpu_dm_audio_init(adev);
3312         if (r) {
3313                 dc_release_state(state->context);
3314                 kfree(state);
3315                 return r;
3316         }
3317
3318         return 0;
3319 }
3320
3321 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3322 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3323 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3324
3325 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3326         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3327
3328 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3329 {
3330 #if defined(CONFIG_ACPI)
3331         struct amdgpu_dm_backlight_caps caps;
3332
3333         memset(&caps, 0, sizeof(caps));
3334
3335         if (dm->backlight_caps.caps_valid)
3336                 return;
3337
3338         amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3339         if (caps.caps_valid) {
3340                 dm->backlight_caps.caps_valid = true;
3341                 if (caps.aux_support)
3342                         return;
3343                 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3344                 dm->backlight_caps.max_input_signal = caps.max_input_signal;
3345         } else {
3346                 dm->backlight_caps.min_input_signal =
3347                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3348                 dm->backlight_caps.max_input_signal =
3349                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3350         }
3351 #else
3352         if (dm->backlight_caps.aux_support)
3353                 return;
3354
3355         dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3356         dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3357 #endif
3358 }
3359
3360 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3361                                 unsigned *min, unsigned *max)
3362 {
3363         if (!caps)
3364                 return 0;
3365
3366         if (caps->aux_support) {
3367                 // Firmware limits are in nits, DC API wants millinits.
3368                 *max = 1000 * caps->aux_max_input_signal;
3369                 *min = 1000 * caps->aux_min_input_signal;
3370         } else {
3371                 // Firmware limits are 8-bit, PWM control is 16-bit.
3372                 *max = 0x101 * caps->max_input_signal;
3373                 *min = 0x101 * caps->min_input_signal;
3374         }
3375         return 1;
3376 }
3377
3378 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3379                                         uint32_t brightness)
3380 {
3381         unsigned min, max;
3382
3383         if (!get_brightness_range(caps, &min, &max))
3384                 return brightness;
3385
3386         // Rescale 0..255 to min..max
3387         return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3388                                        AMDGPU_MAX_BL_LEVEL);
3389 }
3390
3391 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3392                                       uint32_t brightness)
3393 {
3394         unsigned min, max;
3395
3396         if (!get_brightness_range(caps, &min, &max))
3397                 return brightness;
3398
3399         if (brightness < min)
3400                 return 0;
3401         // Rescale min..max to 0..255
3402         return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3403                                  max - min);
3404 }
3405
3406 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3407 {
3408         struct amdgpu_display_manager *dm = bl_get_data(bd);
3409         struct amdgpu_dm_backlight_caps caps;
3410         struct dc_link *link = NULL;
3411         u32 brightness;
3412         bool rc;
3413
3414         amdgpu_dm_update_backlight_caps(dm);
3415         caps = dm->backlight_caps;
3416
3417         link = (struct dc_link *)dm->backlight_link;
3418
3419         brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3420         // Change brightness based on AUX property
3421         if (caps.aux_support)
3422                 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3423                                                       AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3424         else
3425                 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3426
3427         return rc ? 0 : 1;
3428 }
3429
3430 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3431 {
3432         struct amdgpu_display_manager *dm = bl_get_data(bd);
3433         struct amdgpu_dm_backlight_caps caps;
3434
3435         amdgpu_dm_update_backlight_caps(dm);
3436         caps = dm->backlight_caps;
3437
3438         if (caps.aux_support) {
3439                 struct dc_link *link = (struct dc_link *)dm->backlight_link;
3440                 u32 avg, peak;
3441                 bool rc;
3442
3443                 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3444                 if (!rc)
3445                         return bd->props.brightness;
3446                 return convert_brightness_to_user(&caps, avg);
3447         } else {
3448                 int ret = dc_link_get_backlight_level(dm->backlight_link);
3449
3450                 if (ret == DC_ERROR_UNEXPECTED)
3451                         return bd->props.brightness;
3452                 return convert_brightness_to_user(&caps, ret);
3453         }
3454 }
3455
3456 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3457         .options = BL_CORE_SUSPENDRESUME,
3458         .get_brightness = amdgpu_dm_backlight_get_brightness,
3459         .update_status  = amdgpu_dm_backlight_update_status,
3460 };
3461
3462 static void
3463 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3464 {
3465         char bl_name[16];
3466         struct backlight_properties props = { 0 };
3467
3468         amdgpu_dm_update_backlight_caps(dm);
3469
3470         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3471         props.brightness = AMDGPU_MAX_BL_LEVEL;
3472         props.type = BACKLIGHT_RAW;
3473
3474         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3475                  adev_to_drm(dm->adev)->primary->index);
3476
3477         dm->backlight_dev = backlight_device_register(bl_name,
3478                                                       adev_to_drm(dm->adev)->dev,
3479                                                       dm,
3480                                                       &amdgpu_dm_backlight_ops,
3481                                                       &props);
3482
3483         if (IS_ERR(dm->backlight_dev))
3484                 DRM_ERROR("DM: Backlight registration failed!\n");
3485         else
3486                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3487 }
3488
3489 #endif
3490
3491 static int initialize_plane(struct amdgpu_display_manager *dm,
3492                             struct amdgpu_mode_info *mode_info, int plane_id,
3493                             enum drm_plane_type plane_type,
3494                             const struct dc_plane_cap *plane_cap)
3495 {
3496         struct drm_plane *plane;
3497         unsigned long possible_crtcs;
3498         int ret = 0;
3499
3500         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3501         if (!plane) {
3502                 DRM_ERROR("KMS: Failed to allocate plane\n");
3503                 return -ENOMEM;
3504         }
3505         plane->type = plane_type;
3506
3507         /*
3508          * HACK: IGT tests expect that the primary plane for a CRTC
3509          * can only have one possible CRTC. Only expose support for
3510          * any CRTC if they're not going to be used as a primary plane
3511          * for a CRTC - like overlay or underlay planes.
3512          */
3513         possible_crtcs = 1 << plane_id;
3514         if (plane_id >= dm->dc->caps.max_streams)
3515                 possible_crtcs = 0xff;
3516
3517         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3518
3519         if (ret) {
3520                 DRM_ERROR("KMS: Failed to initialize plane\n");
3521                 kfree(plane);
3522                 return ret;
3523         }
3524
3525         if (mode_info)
3526                 mode_info->planes[plane_id] = plane;
3527
3528         return ret;
3529 }
3530
3531
3532 static void register_backlight_device(struct amdgpu_display_manager *dm,
3533                                       struct dc_link *link)
3534 {
3535 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3536         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3537
3538         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3539             link->type != dc_connection_none) {
3540                 /*
3541                  * Event if registration failed, we should continue with
3542                  * DM initialization because not having a backlight control
3543                  * is better then a black screen.
3544                  */
3545                 amdgpu_dm_register_backlight_device(dm);
3546
3547                 if (dm->backlight_dev)
3548                         dm->backlight_link = link;
3549         }
3550 #endif
3551 }
3552
3553
3554 /*
3555  * In this architecture, the association
3556  * connector -> encoder -> crtc
3557  * id not really requried. The crtc and connector will hold the
3558  * display_index as an abstraction to use with DAL component
3559  *
3560  * Returns 0 on success
3561  */
3562 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3563 {
3564         struct amdgpu_display_manager *dm = &adev->dm;
3565         int32_t i;
3566         struct amdgpu_dm_connector *aconnector = NULL;
3567         struct amdgpu_encoder *aencoder = NULL;
3568         struct amdgpu_mode_info *mode_info = &adev->mode_info;
3569         uint32_t link_cnt;
3570         int32_t primary_planes;
3571         enum dc_connection_type new_connection_type = dc_connection_none;
3572         const struct dc_plane_cap *plane;
3573
3574         dm->display_indexes_num = dm->dc->caps.max_streams;
3575         /* Update the actual used number of crtc */
3576         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3577
3578         link_cnt = dm->dc->caps.max_links;
3579         if (amdgpu_dm_mode_config_init(dm->adev)) {
3580                 DRM_ERROR("DM: Failed to initialize mode config\n");
3581                 return -EINVAL;
3582         }
3583
3584         /* There is one primary plane per CRTC */
3585         primary_planes = dm->dc->caps.max_streams;
3586         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3587
3588         /*
3589          * Initialize primary planes, implicit planes for legacy IOCTLS.
3590          * Order is reversed to match iteration order in atomic check.
3591          */
3592         for (i = (primary_planes - 1); i >= 0; i--) {
3593                 plane = &dm->dc->caps.planes[i];
3594
3595                 if (initialize_plane(dm, mode_info, i,
3596                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
3597                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
3598                         goto fail;
3599                 }
3600         }
3601
3602         /*
3603          * Initialize overlay planes, index starting after primary planes.
3604          * These planes have a higher DRM index than the primary planes since
3605          * they should be considered as having a higher z-order.
3606          * Order is reversed to match iteration order in atomic check.
3607          *
3608          * Only support DCN for now, and only expose one so we don't encourage
3609          * userspace to use up all the pipes.
3610          */
3611         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3612                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3613
3614                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3615                         continue;
3616
3617                 if (!plane->blends_with_above || !plane->blends_with_below)
3618                         continue;
3619
3620                 if (!plane->pixel_format_support.argb8888)
3621                         continue;
3622
3623                 if (initialize_plane(dm, NULL, primary_planes + i,
3624                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
3625                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3626                         goto fail;
3627                 }
3628
3629                 /* Only create one overlay plane. */
3630                 break;
3631         }
3632
3633         for (i = 0; i < dm->dc->caps.max_streams; i++)
3634                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3635                         DRM_ERROR("KMS: Failed to initialize crtc\n");
3636                         goto fail;
3637                 }
3638
3639         /* loops over all connectors on the board */
3640         for (i = 0; i < link_cnt; i++) {
3641                 struct dc_link *link = NULL;
3642
3643                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3644                         DRM_ERROR(
3645                                 "KMS: Cannot support more than %d display indexes\n",
3646                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
3647                         continue;
3648                 }
3649
3650                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3651                 if (!aconnector)
3652                         goto fail;
3653
3654                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3655                 if (!aencoder)
3656                         goto fail;
3657
3658                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3659                         DRM_ERROR("KMS: Failed to initialize encoder\n");
3660                         goto fail;
3661                 }
3662
3663                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3664                         DRM_ERROR("KMS: Failed to initialize connector\n");
3665                         goto fail;
3666                 }
3667
3668                 link = dc_get_link_at_index(dm->dc, i);
3669
3670                 if (!dc_link_detect_sink(link, &new_connection_type))
3671                         DRM_ERROR("KMS: Failed to detect connector\n");
3672
3673                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3674                         emulated_link_detect(link);
3675                         amdgpu_dm_update_connector_after_detect(aconnector);
3676
3677                 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3678                         amdgpu_dm_update_connector_after_detect(aconnector);
3679                         register_backlight_device(dm, link);
3680                         if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3681                                 amdgpu_dm_set_psr_caps(link);
3682                 }
3683
3684
3685         }
3686
3687         /* Software is initialized. Now we can register interrupt handlers. */
3688         switch (adev->asic_type) {
3689 #if defined(CONFIG_DRM_AMD_DC_SI)
3690         case CHIP_TAHITI:
3691         case CHIP_PITCAIRN:
3692         case CHIP_VERDE:
3693         case CHIP_OLAND:
3694                 if (dce60_register_irq_handlers(dm->adev)) {
3695                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3696                         goto fail;
3697                 }
3698                 break;
3699 #endif
3700         case CHIP_BONAIRE:
3701         case CHIP_HAWAII:
3702         case CHIP_KAVERI:
3703         case CHIP_KABINI:
3704         case CHIP_MULLINS:
3705         case CHIP_TONGA:
3706         case CHIP_FIJI:
3707         case CHIP_CARRIZO:
3708         case CHIP_STONEY:
3709         case CHIP_POLARIS11:
3710         case CHIP_POLARIS10:
3711         case CHIP_POLARIS12:
3712         case CHIP_VEGAM:
3713         case CHIP_VEGA10:
3714         case CHIP_VEGA12:
3715         case CHIP_VEGA20:
3716                 if (dce110_register_irq_handlers(dm->adev)) {
3717                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3718                         goto fail;
3719                 }
3720                 break;
3721 #if defined(CONFIG_DRM_AMD_DC_DCN)
3722         case CHIP_RAVEN:
3723         case CHIP_NAVI12:
3724         case CHIP_NAVI10:
3725         case CHIP_NAVI14:
3726         case CHIP_RENOIR:
3727         case CHIP_SIENNA_CICHLID:
3728         case CHIP_NAVY_FLOUNDER:
3729         case CHIP_DIMGREY_CAVEFISH:
3730         case CHIP_VANGOGH:
3731                 if (dcn10_register_irq_handlers(dm->adev)) {
3732                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3733                         goto fail;
3734                 }
3735                 break;
3736 #endif
3737         default:
3738                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3739                 goto fail;
3740         }
3741
3742         return 0;
3743 fail:
3744         kfree(aencoder);
3745         kfree(aconnector);
3746
3747         return -EINVAL;
3748 }
3749
3750 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3751 {
3752         drm_mode_config_cleanup(dm->ddev);
3753         drm_atomic_private_obj_fini(&dm->atomic_obj);
3754         return;
3755 }
3756
3757 /******************************************************************************
3758  * amdgpu_display_funcs functions
3759  *****************************************************************************/
3760
3761 /*
3762  * dm_bandwidth_update - program display watermarks
3763  *
3764  * @adev: amdgpu_device pointer
3765  *
3766  * Calculate and program the display watermarks and line buffer allocation.
3767  */
3768 static void dm_bandwidth_update(struct amdgpu_device *adev)
3769 {
3770         /* TODO: implement later */
3771 }
3772
3773 static const struct amdgpu_display_funcs dm_display_funcs = {
3774         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3775         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3776         .backlight_set_level = NULL, /* never called for DC */
3777         .backlight_get_level = NULL, /* never called for DC */
3778         .hpd_sense = NULL,/* called unconditionally */
3779         .hpd_set_polarity = NULL, /* called unconditionally */
3780         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3781         .page_flip_get_scanoutpos =
3782                 dm_crtc_get_scanoutpos,/* called unconditionally */
3783         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3784         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3785 };
3786
3787 #if defined(CONFIG_DEBUG_KERNEL_DC)
3788
3789 static ssize_t s3_debug_store(struct device *device,
3790                               struct device_attribute *attr,
3791                               const char *buf,
3792                               size_t count)
3793 {
3794         int ret;
3795         int s3_state;
3796         struct drm_device *drm_dev = dev_get_drvdata(device);
3797         struct amdgpu_device *adev = drm_to_adev(drm_dev);
3798
3799         ret = kstrtoint(buf, 0, &s3_state);
3800
3801         if (ret == 0) {
3802                 if (s3_state) {
3803                         dm_resume(adev);
3804                         drm_kms_helper_hotplug_event(adev_to_drm(adev));
3805                 } else
3806                         dm_suspend(adev);
3807         }
3808
3809         return ret == 0 ? count : 0;
3810 }
3811
3812 DEVICE_ATTR_WO(s3_debug);
3813
3814 #endif
3815
3816 static int dm_early_init(void *handle)
3817 {
3818         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3819
3820         switch (adev->asic_type) {
3821 #if defined(CONFIG_DRM_AMD_DC_SI)
3822         case CHIP_TAHITI:
3823         case CHIP_PITCAIRN:
3824         case CHIP_VERDE:
3825                 adev->mode_info.num_crtc = 6;
3826                 adev->mode_info.num_hpd = 6;
3827                 adev->mode_info.num_dig = 6;
3828                 break;
3829         case CHIP_OLAND:
3830                 adev->mode_info.num_crtc = 2;
3831                 adev->mode_info.num_hpd = 2;
3832                 adev->mode_info.num_dig = 2;
3833                 break;
3834 #endif
3835         case CHIP_BONAIRE:
3836         case CHIP_HAWAII:
3837                 adev->mode_info.num_crtc = 6;
3838                 adev->mode_info.num_hpd = 6;
3839                 adev->mode_info.num_dig = 6;
3840                 break;
3841         case CHIP_KAVERI:
3842                 adev->mode_info.num_crtc = 4;
3843                 adev->mode_info.num_hpd = 6;
3844                 adev->mode_info.num_dig = 7;
3845                 break;
3846         case CHIP_KABINI:
3847         case CHIP_MULLINS:
3848                 adev->mode_info.num_crtc = 2;
3849                 adev->mode_info.num_hpd = 6;
3850                 adev->mode_info.num_dig = 6;
3851                 break;
3852         case CHIP_FIJI:
3853         case CHIP_TONGA:
3854                 adev->mode_info.num_crtc = 6;
3855                 adev->mode_info.num_hpd = 6;
3856                 adev->mode_info.num_dig = 7;
3857                 break;
3858         case CHIP_CARRIZO:
3859                 adev->mode_info.num_crtc = 3;
3860                 adev->mode_info.num_hpd = 6;
3861                 adev->mode_info.num_dig = 9;
3862                 break;
3863         case CHIP_STONEY:
3864                 adev->mode_info.num_crtc = 2;
3865                 adev->mode_info.num_hpd = 6;
3866                 adev->mode_info.num_dig = 9;
3867                 break;
3868         case CHIP_POLARIS11:
3869         case CHIP_POLARIS12:
3870                 adev->mode_info.num_crtc = 5;
3871                 adev->mode_info.num_hpd = 5;
3872                 adev->mode_info.num_dig = 5;
3873                 break;
3874         case CHIP_POLARIS10:
3875         case CHIP_VEGAM:
3876                 adev->mode_info.num_crtc = 6;
3877                 adev->mode_info.num_hpd = 6;
3878                 adev->mode_info.num_dig = 6;
3879                 break;
3880         case CHIP_VEGA10:
3881         case CHIP_VEGA12:
3882         case CHIP_VEGA20:
3883                 adev->mode_info.num_crtc = 6;
3884                 adev->mode_info.num_hpd = 6;
3885                 adev->mode_info.num_dig = 6;
3886                 break;
3887 #if defined(CONFIG_DRM_AMD_DC_DCN)
3888         case CHIP_RAVEN:
3889         case CHIP_RENOIR:
3890         case CHIP_VANGOGH:
3891                 adev->mode_info.num_crtc = 4;
3892                 adev->mode_info.num_hpd = 4;
3893                 adev->mode_info.num_dig = 4;
3894                 break;
3895         case CHIP_NAVI10:
3896         case CHIP_NAVI12:
3897         case CHIP_SIENNA_CICHLID:
3898         case CHIP_NAVY_FLOUNDER:
3899                 adev->mode_info.num_crtc = 6;
3900                 adev->mode_info.num_hpd = 6;
3901                 adev->mode_info.num_dig = 6;
3902                 break;
3903         case CHIP_NAVI14:
3904         case CHIP_DIMGREY_CAVEFISH:
3905                 adev->mode_info.num_crtc = 5;
3906                 adev->mode_info.num_hpd = 5;
3907                 adev->mode_info.num_dig = 5;
3908                 break;
3909 #endif
3910         default:
3911                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3912                 return -EINVAL;
3913         }
3914
3915         amdgpu_dm_set_irq_funcs(adev);
3916
3917         if (adev->mode_info.funcs == NULL)
3918                 adev->mode_info.funcs = &dm_display_funcs;
3919
3920         /*
3921          * Note: Do NOT change adev->audio_endpt_rreg and
3922          * adev->audio_endpt_wreg because they are initialised in
3923          * amdgpu_device_init()
3924          */
3925 #if defined(CONFIG_DEBUG_KERNEL_DC)
3926         device_create_file(
3927                 adev_to_drm(adev)->dev,
3928                 &dev_attr_s3_debug);
3929 #endif
3930
3931         return 0;
3932 }
3933
3934 static bool modeset_required(struct drm_crtc_state *crtc_state,
3935                              struct dc_stream_state *new_stream,
3936                              struct dc_stream_state *old_stream)
3937 {
3938         return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3939 }
3940
3941 static bool modereset_required(struct drm_crtc_state *crtc_state)
3942 {
3943         return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3944 }
3945
3946 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3947 {
3948         drm_encoder_cleanup(encoder);
3949         kfree(encoder);
3950 }
3951
3952 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3953         .destroy = amdgpu_dm_encoder_destroy,
3954 };
3955
3956
3957 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
3958                                          struct drm_framebuffer *fb,
3959                                          int *min_downscale, int *max_upscale)
3960 {
3961         struct amdgpu_device *adev = drm_to_adev(dev);
3962         struct dc *dc = adev->dm.dc;
3963         /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
3964         struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
3965
3966         switch (fb->format->format) {
3967         case DRM_FORMAT_P010:
3968         case DRM_FORMAT_NV12:
3969         case DRM_FORMAT_NV21:
3970                 *max_upscale = plane_cap->max_upscale_factor.nv12;
3971                 *min_downscale = plane_cap->max_downscale_factor.nv12;
3972                 break;
3973
3974         case DRM_FORMAT_XRGB16161616F:
3975         case DRM_FORMAT_ARGB16161616F:
3976         case DRM_FORMAT_XBGR16161616F:
3977         case DRM_FORMAT_ABGR16161616F:
3978                 *max_upscale = plane_cap->max_upscale_factor.fp16;
3979                 *min_downscale = plane_cap->max_downscale_factor.fp16;
3980                 break;
3981
3982         default:
3983                 *max_upscale = plane_cap->max_upscale_factor.argb8888;
3984                 *min_downscale = plane_cap->max_downscale_factor.argb8888;
3985                 break;
3986         }
3987
3988         /*
3989          * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
3990          * scaling factor of 1.0 == 1000 units.
3991          */
3992         if (*max_upscale == 1)
3993                 *max_upscale = 1000;
3994
3995         if (*min_downscale == 1)
3996                 *min_downscale = 1000;
3997 }
3998
3999
4000 static int fill_dc_scaling_info(const struct drm_plane_state *state,
4001                                 struct dc_scaling_info *scaling_info)
4002 {
4003         int scale_w, scale_h, min_downscale, max_upscale;
4004
4005         memset(scaling_info, 0, sizeof(*scaling_info));
4006
4007         /* Source is fixed 16.16 but we ignore mantissa for now... */
4008         scaling_info->src_rect.x = state->src_x >> 16;
4009         scaling_info->src_rect.y = state->src_y >> 16;
4010
4011         /*
4012          * For reasons we don't (yet) fully understand a non-zero
4013          * src_y coordinate into an NV12 buffer can cause a
4014          * system hang. To avoid hangs (and maybe be overly cautious)
4015          * let's reject both non-zero src_x and src_y.
4016          *
4017          * We currently know of only one use-case to reproduce a
4018          * scenario with non-zero src_x and src_y for NV12, which
4019          * is to gesture the YouTube Android app into full screen
4020          * on ChromeOS.
4021          */
4022         if (state->fb &&
4023             state->fb->format->format == DRM_FORMAT_NV12 &&
4024             (scaling_info->src_rect.x != 0 ||
4025              scaling_info->src_rect.y != 0))
4026                 return -EINVAL;
4027
4028         scaling_info->src_rect.width = state->src_w >> 16;
4029         if (scaling_info->src_rect.width == 0)
4030                 return -EINVAL;
4031
4032         scaling_info->src_rect.height = state->src_h >> 16;
4033         if (scaling_info->src_rect.height == 0)
4034                 return -EINVAL;
4035
4036         scaling_info->dst_rect.x = state->crtc_x;
4037         scaling_info->dst_rect.y = state->crtc_y;
4038
4039         if (state->crtc_w == 0)
4040                 return -EINVAL;
4041
4042         scaling_info->dst_rect.width = state->crtc_w;
4043
4044         if (state->crtc_h == 0)
4045                 return -EINVAL;
4046
4047         scaling_info->dst_rect.height = state->crtc_h;
4048
4049         /* DRM doesn't specify clipping on destination output. */
4050         scaling_info->clip_rect = scaling_info->dst_rect;
4051
4052         /* Validate scaling per-format with DC plane caps */
4053         if (state->plane && state->plane->dev && state->fb) {
4054                 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4055                                              &min_downscale, &max_upscale);
4056         } else {
4057                 min_downscale = 250;
4058                 max_upscale = 16000;
4059         }
4060
4061         scale_w = scaling_info->dst_rect.width * 1000 /
4062                   scaling_info->src_rect.width;
4063
4064         if (scale_w < min_downscale || scale_w > max_upscale)
4065                 return -EINVAL;
4066
4067         scale_h = scaling_info->dst_rect.height * 1000 /
4068                   scaling_info->src_rect.height;
4069
4070         if (scale_h < min_downscale || scale_h > max_upscale)
4071                 return -EINVAL;
4072
4073         /*
4074          * The "scaling_quality" can be ignored for now, quality = 0 has DC
4075          * assume reasonable defaults based on the format.
4076          */
4077
4078         return 0;
4079 }
4080
4081 static void
4082 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4083                                  uint64_t tiling_flags)
4084 {
4085         /* Fill GFX8 params */
4086         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4087                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4088
4089                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4090                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4091                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4092                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4093                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4094
4095                 /* XXX fix me for VI */
4096                 tiling_info->gfx8.num_banks = num_banks;
4097                 tiling_info->gfx8.array_mode =
4098                                 DC_ARRAY_2D_TILED_THIN1;
4099                 tiling_info->gfx8.tile_split = tile_split;
4100                 tiling_info->gfx8.bank_width = bankw;
4101                 tiling_info->gfx8.bank_height = bankh;
4102                 tiling_info->gfx8.tile_aspect = mtaspect;
4103                 tiling_info->gfx8.tile_mode =
4104                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4105         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4106                         == DC_ARRAY_1D_TILED_THIN1) {
4107                 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4108         }
4109
4110         tiling_info->gfx8.pipe_config =
4111                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4112 }
4113
4114 static void
4115 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4116                                   union dc_tiling_info *tiling_info)
4117 {
4118         tiling_info->gfx9.num_pipes =
4119                 adev->gfx.config.gb_addr_config_fields.num_pipes;
4120         tiling_info->gfx9.num_banks =
4121                 adev->gfx.config.gb_addr_config_fields.num_banks;
4122         tiling_info->gfx9.pipe_interleave =
4123                 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4124         tiling_info->gfx9.num_shader_engines =
4125                 adev->gfx.config.gb_addr_config_fields.num_se;
4126         tiling_info->gfx9.max_compressed_frags =
4127                 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4128         tiling_info->gfx9.num_rb_per_se =
4129                 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4130         tiling_info->gfx9.shaderEnable = 1;
4131         if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4132             adev->asic_type == CHIP_NAVY_FLOUNDER ||
4133             adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4134             adev->asic_type == CHIP_VANGOGH)
4135                 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4136 }
4137
4138 static int
4139 validate_dcc(struct amdgpu_device *adev,
4140              const enum surface_pixel_format format,
4141              const enum dc_rotation_angle rotation,
4142              const union dc_tiling_info *tiling_info,
4143              const struct dc_plane_dcc_param *dcc,
4144              const struct dc_plane_address *address,
4145              const struct plane_size *plane_size)
4146 {
4147         struct dc *dc = adev->dm.dc;
4148         struct dc_dcc_surface_param input;
4149         struct dc_surface_dcc_cap output;
4150
4151         memset(&input, 0, sizeof(input));
4152         memset(&output, 0, sizeof(output));
4153
4154         if (!dcc->enable)
4155                 return 0;
4156
4157         if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4158             !dc->cap_funcs.get_dcc_compression_cap)
4159                 return -EINVAL;
4160
4161         input.format = format;
4162         input.surface_size.width = plane_size->surface_size.width;
4163         input.surface_size.height = plane_size->surface_size.height;
4164         input.swizzle_mode = tiling_info->gfx9.swizzle;
4165
4166         if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4167                 input.scan = SCAN_DIRECTION_HORIZONTAL;
4168         else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4169                 input.scan = SCAN_DIRECTION_VERTICAL;
4170
4171         if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4172                 return -EINVAL;
4173
4174         if (!output.capable)
4175                 return -EINVAL;
4176
4177         if (dcc->independent_64b_blks == 0 &&
4178             output.grph.rgb.independent_64b_blks != 0)
4179                 return -EINVAL;
4180
4181         return 0;
4182 }
4183
4184 static bool
4185 modifier_has_dcc(uint64_t modifier)
4186 {
4187         return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4188 }
4189
4190 static unsigned
4191 modifier_gfx9_swizzle_mode(uint64_t modifier)
4192 {
4193         if (modifier == DRM_FORMAT_MOD_LINEAR)
4194                 return 0;
4195
4196         return AMD_FMT_MOD_GET(TILE, modifier);
4197 }
4198
4199 static const struct drm_format_info *
4200 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4201 {
4202         return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4203 }
4204
4205 static void
4206 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4207                                     union dc_tiling_info *tiling_info,
4208                                     uint64_t modifier)
4209 {
4210         unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4211         unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4212         unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4213         unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4214
4215         fill_gfx9_tiling_info_from_device(adev, tiling_info);
4216
4217         if (!IS_AMD_FMT_MOD(modifier))
4218                 return;
4219
4220         tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4221         tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4222
4223         if (adev->family >= AMDGPU_FAMILY_NV) {
4224                 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4225         } else {
4226                 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4227
4228                 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4229         }
4230 }
4231
4232 enum dm_micro_swizzle {
4233         MICRO_SWIZZLE_Z = 0,
4234         MICRO_SWIZZLE_S = 1,
4235         MICRO_SWIZZLE_D = 2,
4236         MICRO_SWIZZLE_R = 3
4237 };
4238
4239 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4240                                           uint32_t format,
4241                                           uint64_t modifier)
4242 {
4243         struct amdgpu_device *adev = drm_to_adev(plane->dev);
4244         const struct drm_format_info *info = drm_format_info(format);
4245
4246         enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4247
4248         if (!info)
4249                 return false;
4250
4251         /*
4252          * We always have to allow this modifier, because core DRM still
4253          * checks LINEAR support if userspace does not provide modifers.
4254          */
4255         if (modifier == DRM_FORMAT_MOD_LINEAR)
4256                 return true;
4257
4258         /*
4259          * The arbitrary tiling support for multiplane formats has not been hooked
4260          * up.
4261          */
4262         if (info->num_planes > 1)
4263                 return false;
4264
4265         /*
4266          * For D swizzle the canonical modifier depends on the bpp, so check
4267          * it here.
4268          */
4269         if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4270             adev->family >= AMDGPU_FAMILY_NV) {
4271                 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4272                         return false;
4273         }
4274
4275         if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4276             info->cpp[0] < 8)
4277                 return false;
4278
4279         if (modifier_has_dcc(modifier)) {
4280                 /* Per radeonsi comments 16/64 bpp are more complicated. */
4281                 if (info->cpp[0] != 4)
4282                         return false;
4283         }
4284
4285         return true;
4286 }
4287
4288 static void
4289 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4290 {
4291         if (!*mods)
4292                 return;
4293
4294         if (*cap - *size < 1) {
4295                 uint64_t new_cap = *cap * 2;
4296                 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4297
4298                 if (!new_mods) {
4299                         kfree(*mods);
4300                         *mods = NULL;
4301                         return;
4302                 }
4303
4304                 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4305                 kfree(*mods);
4306                 *mods = new_mods;
4307                 *cap = new_cap;
4308         }
4309
4310         (*mods)[*size] = mod;
4311         *size += 1;
4312 }
4313
4314 static void
4315 add_gfx9_modifiers(const struct amdgpu_device *adev,
4316                    uint64_t **mods, uint64_t *size, uint64_t *capacity)
4317 {
4318         int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4319         int pipe_xor_bits = min(8, pipes +
4320                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4321         int bank_xor_bits = min(8 - pipe_xor_bits,
4322                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4323         int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4324                  ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4325
4326
4327         if (adev->family == AMDGPU_FAMILY_RV) {
4328                 /* Raven2 and later */
4329                 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4330
4331                 /*
4332                  * No _D DCC swizzles yet because we only allow 32bpp, which
4333                  * doesn't support _D on DCN
4334                  */
4335
4336                 if (has_constant_encode) {
4337                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4338                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4339                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4340                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4341                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4342                                     AMD_FMT_MOD_SET(DCC, 1) |
4343                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4344                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4345                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4346                 }
4347
4348                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4349                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4350                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4351                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4352                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4353                             AMD_FMT_MOD_SET(DCC, 1) |
4354                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4355                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4356                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4357
4358                 if (has_constant_encode) {
4359                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4360                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4361                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4362                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4363                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4364                                     AMD_FMT_MOD_SET(DCC, 1) |
4365                                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4366                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4367                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4368
4369                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4370                                     AMD_FMT_MOD_SET(RB, rb) |
4371                                     AMD_FMT_MOD_SET(PIPE, pipes));
4372                 }
4373
4374                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4375                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4376                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4377                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4378                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4379                             AMD_FMT_MOD_SET(DCC, 1) |
4380                             AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4381                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4382                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4383                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4384                             AMD_FMT_MOD_SET(RB, rb) |
4385                             AMD_FMT_MOD_SET(PIPE, pipes));
4386         }
4387
4388         /*
4389          * Only supported for 64bpp on Raven, will be filtered on format in
4390          * dm_plane_format_mod_supported.
4391          */
4392         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4393                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4394                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4395                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4396                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4397
4398         if (adev->family == AMDGPU_FAMILY_RV) {
4399                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4400                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4401                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4402                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4403                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4404         }
4405
4406         /*
4407          * Only supported for 64bpp on Raven, will be filtered on format in
4408          * dm_plane_format_mod_supported.
4409          */
4410         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4411                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4412                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4413
4414         if (adev->family == AMDGPU_FAMILY_RV) {
4415                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4416                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4417                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4418         }
4419 }
4420
4421 static void
4422 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4423                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
4424 {
4425         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4426
4427         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4428                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4429                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4430                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4431                     AMD_FMT_MOD_SET(DCC, 1) |
4432                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4433                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4434                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4435
4436         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4437                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4438                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4439                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4440                     AMD_FMT_MOD_SET(DCC, 1) |
4441                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4442                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4443                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4444                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4445
4446         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4447                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4448                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4449                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4450
4451         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4452                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4453                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4454                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4455
4456
4457         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4458         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4459                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4460                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4461
4462         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4463                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4464                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4465 }
4466
4467 static void
4468 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4469                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
4470 {
4471         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4472         int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4473
4474         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4475                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4476                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4477                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4478                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
4479                     AMD_FMT_MOD_SET(DCC, 1) |
4480                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4481                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4482                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4483                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4484
4485         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4486                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4487                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4488                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4489                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
4490                     AMD_FMT_MOD_SET(DCC, 1) |
4491                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4492                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4493                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4494                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4495                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4496
4497         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4498                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4499                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4500                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4501                     AMD_FMT_MOD_SET(PACKERS, pkrs));
4502
4503         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4504                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4505                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4506                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4507                     AMD_FMT_MOD_SET(PACKERS, pkrs));
4508
4509         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4510         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4511                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4512                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4513
4514         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4515                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4516                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4517 }
4518
4519 static int
4520 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4521 {
4522         uint64_t size = 0, capacity = 128;
4523         *mods = NULL;
4524
4525         /* We have not hooked up any pre-GFX9 modifiers. */
4526         if (adev->family < AMDGPU_FAMILY_AI)
4527                 return 0;
4528
4529         *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4530
4531         if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4532                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4533                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4534                 return *mods ? 0 : -ENOMEM;
4535         }
4536
4537         switch (adev->family) {
4538         case AMDGPU_FAMILY_AI:
4539         case AMDGPU_FAMILY_RV:
4540                 add_gfx9_modifiers(adev, mods, &size, &capacity);
4541                 break;
4542         case AMDGPU_FAMILY_NV:
4543         case AMDGPU_FAMILY_VGH:
4544                 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4545                         add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4546                 else
4547                         add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4548                 break;
4549         }
4550
4551         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4552
4553         /* INVALID marks the end of the list. */
4554         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4555
4556         if (!*mods)
4557                 return -ENOMEM;
4558
4559         return 0;
4560 }
4561
4562 static int
4563 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4564                                           const struct amdgpu_framebuffer *afb,
4565                                           const enum surface_pixel_format format,
4566                                           const enum dc_rotation_angle rotation,
4567                                           const struct plane_size *plane_size,
4568                                           union dc_tiling_info *tiling_info,
4569                                           struct dc_plane_dcc_param *dcc,
4570                                           struct dc_plane_address *address,
4571                                           const bool force_disable_dcc)
4572 {
4573         const uint64_t modifier = afb->base.modifier;
4574         int ret;
4575
4576         fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4577         tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4578
4579         if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4580                 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4581
4582                 dcc->enable = 1;
4583                 dcc->meta_pitch = afb->base.pitches[1];
4584                 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4585
4586                 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4587                 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4588         }
4589
4590         ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4591         if (ret)
4592                 return ret;
4593
4594         return 0;
4595 }
4596
4597 static int
4598 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4599                              const struct amdgpu_framebuffer *afb,
4600                              const enum surface_pixel_format format,
4601                              const enum dc_rotation_angle rotation,
4602                              const uint64_t tiling_flags,
4603                              union dc_tiling_info *tiling_info,
4604                              struct plane_size *plane_size,
4605                              struct dc_plane_dcc_param *dcc,
4606                              struct dc_plane_address *address,
4607                              bool tmz_surface,
4608                              bool force_disable_dcc)
4609 {
4610         const struct drm_framebuffer *fb = &afb->base;
4611         int ret;
4612
4613         memset(tiling_info, 0, sizeof(*tiling_info));
4614         memset(plane_size, 0, sizeof(*plane_size));
4615         memset(dcc, 0, sizeof(*dcc));
4616         memset(address, 0, sizeof(*address));
4617
4618         address->tmz_surface = tmz_surface;
4619
4620         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4621                 uint64_t addr = afb->address + fb->offsets[0];
4622
4623                 plane_size->surface_size.x = 0;
4624                 plane_size->surface_size.y = 0;
4625                 plane_size->surface_size.width = fb->width;
4626                 plane_size->surface_size.height = fb->height;
4627                 plane_size->surface_pitch =
4628                         fb->pitches[0] / fb->format->cpp[0];
4629
4630                 address->type = PLN_ADDR_TYPE_GRAPHICS;
4631                 address->grph.addr.low_part = lower_32_bits(addr);
4632                 address->grph.addr.high_part = upper_32_bits(addr);
4633         } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4634                 uint64_t luma_addr = afb->address + fb->offsets[0];
4635                 uint64_t chroma_addr = afb->address + fb->offsets[1];
4636
4637                 plane_size->surface_size.x = 0;
4638                 plane_size->surface_size.y = 0;
4639                 plane_size->surface_size.width = fb->width;
4640                 plane_size->surface_size.height = fb->height;
4641                 plane_size->surface_pitch =
4642                         fb->pitches[0] / fb->format->cpp[0];
4643
4644                 plane_size->chroma_size.x = 0;
4645                 plane_size->chroma_size.y = 0;
4646                 /* TODO: set these based on surface format */
4647                 plane_size->chroma_size.width = fb->width / 2;
4648                 plane_size->chroma_size.height = fb->height / 2;
4649
4650                 plane_size->chroma_pitch =
4651                         fb->pitches[1] / fb->format->cpp[1];
4652
4653                 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4654                 address->video_progressive.luma_addr.low_part =
4655                         lower_32_bits(luma_addr);
4656                 address->video_progressive.luma_addr.high_part =
4657                         upper_32_bits(luma_addr);
4658                 address->video_progressive.chroma_addr.low_part =
4659                         lower_32_bits(chroma_addr);
4660                 address->video_progressive.chroma_addr.high_part =
4661                         upper_32_bits(chroma_addr);
4662         }
4663
4664         if (adev->family >= AMDGPU_FAMILY_AI) {
4665                 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4666                                                                 rotation, plane_size,
4667                                                                 tiling_info, dcc,
4668                                                                 address,
4669                                                                 force_disable_dcc);
4670                 if (ret)
4671                         return ret;
4672         } else {
4673                 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4674         }
4675
4676         return 0;
4677 }
4678
4679 static void
4680 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4681                                bool *per_pixel_alpha, bool *global_alpha,
4682                                int *global_alpha_value)
4683 {
4684         *per_pixel_alpha = false;
4685         *global_alpha = false;
4686         *global_alpha_value = 0xff;
4687
4688         if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4689                 return;
4690
4691         if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4692                 static const uint32_t alpha_formats[] = {
4693                         DRM_FORMAT_ARGB8888,
4694                         DRM_FORMAT_RGBA8888,
4695                         DRM_FORMAT_ABGR8888,
4696                 };
4697                 uint32_t format = plane_state->fb->format->format;
4698                 unsigned int i;
4699
4700                 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4701                         if (format == alpha_formats[i]) {
4702                                 *per_pixel_alpha = true;
4703                                 break;
4704                         }
4705                 }
4706         }
4707
4708         if (plane_state->alpha < 0xffff) {
4709                 *global_alpha = true;
4710                 *global_alpha_value = plane_state->alpha >> 8;
4711         }
4712 }
4713
4714 static int
4715 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4716                             const enum surface_pixel_format format,
4717                             enum dc_color_space *color_space)
4718 {
4719         bool full_range;
4720
4721         *color_space = COLOR_SPACE_SRGB;
4722
4723         /* DRM color properties only affect non-RGB formats. */
4724         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4725                 return 0;
4726
4727         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4728
4729         switch (plane_state->color_encoding) {
4730         case DRM_COLOR_YCBCR_BT601:
4731                 if (full_range)
4732                         *color_space = COLOR_SPACE_YCBCR601;
4733                 else
4734                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4735                 break;
4736
4737         case DRM_COLOR_YCBCR_BT709:
4738                 if (full_range)
4739                         *color_space = COLOR_SPACE_YCBCR709;
4740                 else
4741                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4742                 break;
4743
4744         case DRM_COLOR_YCBCR_BT2020:
4745                 if (full_range)
4746                         *color_space = COLOR_SPACE_2020_YCBCR;
4747                 else
4748                         return -EINVAL;
4749                 break;
4750
4751         default:
4752                 return -EINVAL;
4753         }
4754
4755         return 0;
4756 }
4757
4758 static int
4759 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4760                             const struct drm_plane_state *plane_state,
4761                             const uint64_t tiling_flags,
4762                             struct dc_plane_info *plane_info,
4763                             struct dc_plane_address *address,
4764                             bool tmz_surface,
4765                             bool force_disable_dcc)
4766 {
4767         const struct drm_framebuffer *fb = plane_state->fb;
4768         const struct amdgpu_framebuffer *afb =
4769                 to_amdgpu_framebuffer(plane_state->fb);
4770         int ret;
4771
4772         memset(plane_info, 0, sizeof(*plane_info));
4773
4774         switch (fb->format->format) {
4775         case DRM_FORMAT_C8:
4776                 plane_info->format =
4777                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4778                 break;
4779         case DRM_FORMAT_RGB565:
4780                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4781                 break;
4782         case DRM_FORMAT_XRGB8888:
4783         case DRM_FORMAT_ARGB8888:
4784                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4785                 break;
4786         case DRM_FORMAT_XRGB2101010:
4787         case DRM_FORMAT_ARGB2101010:
4788                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4789                 break;
4790         case DRM_FORMAT_XBGR2101010:
4791         case DRM_FORMAT_ABGR2101010:
4792                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4793                 break;
4794         case DRM_FORMAT_XBGR8888:
4795         case DRM_FORMAT_ABGR8888:
4796                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4797                 break;
4798         case DRM_FORMAT_NV21:
4799                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4800                 break;
4801         case DRM_FORMAT_NV12:
4802                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4803                 break;
4804         case DRM_FORMAT_P010:
4805                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4806                 break;
4807         case DRM_FORMAT_XRGB16161616F:
4808         case DRM_FORMAT_ARGB16161616F:
4809                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4810                 break;
4811         case DRM_FORMAT_XBGR16161616F:
4812         case DRM_FORMAT_ABGR16161616F:
4813                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4814                 break;
4815         default:
4816                 DRM_ERROR(
4817                         "Unsupported screen format %p4cc\n",
4818                         &fb->format->format);
4819                 return -EINVAL;
4820         }
4821
4822         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4823         case DRM_MODE_ROTATE_0:
4824                 plane_info->rotation = ROTATION_ANGLE_0;
4825                 break;
4826         case DRM_MODE_ROTATE_90:
4827                 plane_info->rotation = ROTATION_ANGLE_90;
4828                 break;
4829         case DRM_MODE_ROTATE_180:
4830                 plane_info->rotation = ROTATION_ANGLE_180;
4831                 break;
4832         case DRM_MODE_ROTATE_270:
4833                 plane_info->rotation = ROTATION_ANGLE_270;
4834                 break;
4835         default:
4836                 plane_info->rotation = ROTATION_ANGLE_0;
4837                 break;
4838         }
4839
4840         plane_info->visible = true;
4841         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4842
4843         plane_info->layer_index = 0;
4844
4845         ret = fill_plane_color_attributes(plane_state, plane_info->format,
4846                                           &plane_info->color_space);
4847         if (ret)
4848                 return ret;
4849
4850         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4851                                            plane_info->rotation, tiling_flags,
4852                                            &plane_info->tiling_info,
4853                                            &plane_info->plane_size,
4854                                            &plane_info->dcc, address, tmz_surface,
4855                                            force_disable_dcc);
4856         if (ret)
4857                 return ret;
4858
4859         fill_blending_from_plane_state(
4860                 plane_state, &plane_info->per_pixel_alpha,
4861                 &plane_info->global_alpha, &plane_info->global_alpha_value);
4862
4863         return 0;
4864 }
4865
4866 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4867                                     struct dc_plane_state *dc_plane_state,
4868                                     struct drm_plane_state *plane_state,
4869                                     struct drm_crtc_state *crtc_state)
4870 {
4871         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4872         struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4873         struct dc_scaling_info scaling_info;
4874         struct dc_plane_info plane_info;
4875         int ret;
4876         bool force_disable_dcc = false;
4877
4878         ret = fill_dc_scaling_info(plane_state, &scaling_info);
4879         if (ret)
4880                 return ret;
4881
4882         dc_plane_state->src_rect = scaling_info.src_rect;
4883         dc_plane_state->dst_rect = scaling_info.dst_rect;
4884         dc_plane_state->clip_rect = scaling_info.clip_rect;
4885         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4886
4887         force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4888         ret = fill_dc_plane_info_and_addr(adev, plane_state,
4889                                           afb->tiling_flags,
4890                                           &plane_info,
4891                                           &dc_plane_state->address,
4892                                           afb->tmz_surface,
4893                                           force_disable_dcc);
4894         if (ret)
4895                 return ret;
4896
4897         dc_plane_state->format = plane_info.format;
4898         dc_plane_state->color_space = plane_info.color_space;
4899         dc_plane_state->format = plane_info.format;
4900         dc_plane_state->plane_size = plane_info.plane_size;
4901         dc_plane_state->rotation = plane_info.rotation;
4902         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4903         dc_plane_state->stereo_format = plane_info.stereo_format;
4904         dc_plane_state->tiling_info = plane_info.tiling_info;
4905         dc_plane_state->visible = plane_info.visible;
4906         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4907         dc_plane_state->global_alpha = plane_info.global_alpha;
4908         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4909         dc_plane_state->dcc = plane_info.dcc;
4910         dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4911         dc_plane_state->flip_int_enabled = true;
4912
4913         /*
4914          * Always set input transfer function, since plane state is refreshed
4915          * every time.
4916          */
4917         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4918         if (ret)
4919                 return ret;
4920
4921         return 0;
4922 }
4923
4924 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4925                                            const struct dm_connector_state *dm_state,
4926                                            struct dc_stream_state *stream)
4927 {
4928         enum amdgpu_rmx_type rmx_type;
4929
4930         struct rect src = { 0 }; /* viewport in composition space*/
4931         struct rect dst = { 0 }; /* stream addressable area */
4932
4933         /* no mode. nothing to be done */
4934         if (!mode)
4935                 return;
4936
4937         /* Full screen scaling by default */
4938         src.width = mode->hdisplay;
4939         src.height = mode->vdisplay;
4940         dst.width = stream->timing.h_addressable;
4941         dst.height = stream->timing.v_addressable;
4942
4943         if (dm_state) {
4944                 rmx_type = dm_state->scaling;
4945                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4946                         if (src.width * dst.height <
4947                                         src.height * dst.width) {
4948                                 /* height needs less upscaling/more downscaling */
4949                                 dst.width = src.width *
4950                                                 dst.height / src.height;
4951                         } else {
4952                                 /* width needs less upscaling/more downscaling */
4953                                 dst.height = src.height *
4954                                                 dst.width / src.width;
4955                         }
4956                 } else if (rmx_type == RMX_CENTER) {
4957                         dst = src;
4958                 }
4959
4960                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4961                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
4962
4963                 if (dm_state->underscan_enable) {
4964                         dst.x += dm_state->underscan_hborder / 2;
4965                         dst.y += dm_state->underscan_vborder / 2;
4966                         dst.width -= dm_state->underscan_hborder;
4967                         dst.height -= dm_state->underscan_vborder;
4968                 }
4969         }
4970
4971         stream->src = src;
4972         stream->dst = dst;
4973
4974         DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4975                       dst.x, dst.y, dst.width, dst.height);
4976
4977 }
4978
4979 static enum dc_color_depth
4980 convert_color_depth_from_display_info(const struct drm_connector *connector,
4981                                       bool is_y420, int requested_bpc)
4982 {
4983         uint8_t bpc;
4984
4985         if (is_y420) {
4986                 bpc = 8;
4987
4988                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4989                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4990                         bpc = 16;
4991                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4992                         bpc = 12;
4993                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4994                         bpc = 10;
4995         } else {
4996                 bpc = (uint8_t)connector->display_info.bpc;
4997                 /* Assume 8 bpc by default if no bpc is specified. */
4998                 bpc = bpc ? bpc : 8;
4999         }
5000
5001         if (requested_bpc > 0) {
5002                 /*
5003                  * Cap display bpc based on the user requested value.
5004                  *
5005                  * The value for state->max_bpc may not correctly updated
5006                  * depending on when the connector gets added to the state
5007                  * or if this was called outside of atomic check, so it
5008                  * can't be used directly.
5009                  */
5010                 bpc = min_t(u8, bpc, requested_bpc);
5011
5012                 /* Round down to the nearest even number. */
5013                 bpc = bpc - (bpc & 1);
5014         }
5015
5016         switch (bpc) {
5017         case 0:
5018                 /*
5019                  * Temporary Work around, DRM doesn't parse color depth for
5020                  * EDID revision before 1.4
5021                  * TODO: Fix edid parsing
5022                  */
5023                 return COLOR_DEPTH_888;
5024         case 6:
5025                 return COLOR_DEPTH_666;
5026         case 8:
5027                 return COLOR_DEPTH_888;
5028         case 10:
5029                 return COLOR_DEPTH_101010;
5030         case 12:
5031                 return COLOR_DEPTH_121212;
5032         case 14:
5033                 return COLOR_DEPTH_141414;
5034         case 16:
5035                 return COLOR_DEPTH_161616;
5036         default:
5037                 return COLOR_DEPTH_UNDEFINED;
5038         }
5039 }
5040
5041 static enum dc_aspect_ratio
5042 get_aspect_ratio(const struct drm_display_mode *mode_in)
5043 {
5044         /* 1-1 mapping, since both enums follow the HDMI spec. */
5045         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5046 }
5047
5048 static enum dc_color_space
5049 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5050 {
5051         enum dc_color_space color_space = COLOR_SPACE_SRGB;
5052
5053         switch (dc_crtc_timing->pixel_encoding) {
5054         case PIXEL_ENCODING_YCBCR422:
5055         case PIXEL_ENCODING_YCBCR444:
5056         case PIXEL_ENCODING_YCBCR420:
5057         {
5058                 /*
5059                  * 27030khz is the separation point between HDTV and SDTV
5060                  * according to HDMI spec, we use YCbCr709 and YCbCr601
5061                  * respectively
5062                  */
5063                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
5064                         if (dc_crtc_timing->flags.Y_ONLY)
5065                                 color_space =
5066                                         COLOR_SPACE_YCBCR709_LIMITED;
5067                         else
5068                                 color_space = COLOR_SPACE_YCBCR709;
5069                 } else {
5070                         if (dc_crtc_timing->flags.Y_ONLY)
5071                                 color_space =
5072                                         COLOR_SPACE_YCBCR601_LIMITED;
5073                         else
5074                                 color_space = COLOR_SPACE_YCBCR601;
5075                 }
5076
5077         }
5078         break;
5079         case PIXEL_ENCODING_RGB:
5080                 color_space = COLOR_SPACE_SRGB;
5081                 break;
5082
5083         default:
5084                 WARN_ON(1);
5085                 break;
5086         }
5087
5088         return color_space;
5089 }
5090
5091 static bool adjust_colour_depth_from_display_info(
5092         struct dc_crtc_timing *timing_out,
5093         const struct drm_display_info *info)
5094 {
5095         enum dc_color_depth depth = timing_out->display_color_depth;
5096         int normalized_clk;
5097         do {
5098                 normalized_clk = timing_out->pix_clk_100hz / 10;
5099                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5100                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5101                         normalized_clk /= 2;
5102                 /* Adjusting pix clock following on HDMI spec based on colour depth */
5103                 switch (depth) {
5104                 case COLOR_DEPTH_888:
5105                         break;
5106                 case COLOR_DEPTH_101010:
5107                         normalized_clk = (normalized_clk * 30) / 24;
5108                         break;
5109                 case COLOR_DEPTH_121212:
5110                         normalized_clk = (normalized_clk * 36) / 24;
5111                         break;
5112                 case COLOR_DEPTH_161616:
5113                         normalized_clk = (normalized_clk * 48) / 24;
5114                         break;
5115                 default:
5116                         /* The above depths are the only ones valid for HDMI. */
5117                         return false;
5118                 }
5119                 if (normalized_clk <= info->max_tmds_clock) {
5120                         timing_out->display_color_depth = depth;
5121                         return true;
5122                 }
5123         } while (--depth > COLOR_DEPTH_666);
5124         return false;
5125 }
5126
5127 static void fill_stream_properties_from_drm_display_mode(
5128         struct dc_stream_state *stream,
5129         const struct drm_display_mode *mode_in,
5130         const struct drm_connector *connector,
5131         const struct drm_connector_state *connector_state,
5132         const struct dc_stream_state *old_stream,
5133         int requested_bpc)
5134 {
5135         struct dc_crtc_timing *timing_out = &stream->timing;
5136         const struct drm_display_info *info = &connector->display_info;
5137         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5138         struct hdmi_vendor_infoframe hv_frame;
5139         struct hdmi_avi_infoframe avi_frame;
5140
5141         memset(&hv_frame, 0, sizeof(hv_frame));
5142         memset(&avi_frame, 0, sizeof(avi_frame));
5143
5144         timing_out->h_border_left = 0;
5145         timing_out->h_border_right = 0;
5146         timing_out->v_border_top = 0;
5147         timing_out->v_border_bottom = 0;
5148         /* TODO: un-hardcode */
5149         if (drm_mode_is_420_only(info, mode_in)
5150                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5151                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5152         else if (drm_mode_is_420_also(info, mode_in)
5153                         && aconnector->force_yuv420_output)
5154                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5155         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5156                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5157                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5158         else
5159                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5160
5161         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5162         timing_out->display_color_depth = convert_color_depth_from_display_info(
5163                 connector,
5164                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5165                 requested_bpc);
5166         timing_out->scan_type = SCANNING_TYPE_NODATA;
5167         timing_out->hdmi_vic = 0;
5168
5169         if(old_stream) {
5170                 timing_out->vic = old_stream->timing.vic;
5171                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5172                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5173         } else {
5174                 timing_out->vic = drm_match_cea_mode(mode_in);
5175                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5176                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5177                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5178                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5179         }
5180
5181         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5182                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5183                 timing_out->vic = avi_frame.video_code;
5184                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5185                 timing_out->hdmi_vic = hv_frame.vic;
5186         }
5187
5188         if (is_freesync_video_mode(mode_in, aconnector)) {
5189                 timing_out->h_addressable = mode_in->hdisplay;
5190                 timing_out->h_total = mode_in->htotal;
5191                 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5192                 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5193                 timing_out->v_total = mode_in->vtotal;
5194                 timing_out->v_addressable = mode_in->vdisplay;
5195                 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5196                 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5197                 timing_out->pix_clk_100hz = mode_in->clock * 10;
5198         } else {
5199                 timing_out->h_addressable = mode_in->crtc_hdisplay;
5200                 timing_out->h_total = mode_in->crtc_htotal;
5201                 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5202                 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5203                 timing_out->v_total = mode_in->crtc_vtotal;
5204                 timing_out->v_addressable = mode_in->crtc_vdisplay;
5205                 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5206                 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5207                 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5208         }
5209
5210         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5211
5212         stream->output_color_space = get_output_color_space(timing_out);
5213
5214         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5215         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5216         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5217                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5218                     drm_mode_is_420_also(info, mode_in) &&
5219                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5220                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5221                         adjust_colour_depth_from_display_info(timing_out, info);
5222                 }
5223         }
5224 }
5225
5226 static void fill_audio_info(struct audio_info *audio_info,
5227                             const struct drm_connector *drm_connector,
5228                             const struct dc_sink *dc_sink)
5229 {
5230         int i = 0;
5231         int cea_revision = 0;
5232         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5233
5234         audio_info->manufacture_id = edid_caps->manufacturer_id;
5235         audio_info->product_id = edid_caps->product_id;
5236
5237         cea_revision = drm_connector->display_info.cea_rev;
5238
5239         strscpy(audio_info->display_name,
5240                 edid_caps->display_name,
5241                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5242
5243         if (cea_revision >= 3) {
5244                 audio_info->mode_count = edid_caps->audio_mode_count;
5245
5246                 for (i = 0; i < audio_info->mode_count; ++i) {
5247                         audio_info->modes[i].format_code =
5248                                         (enum audio_format_code)
5249                                         (edid_caps->audio_modes[i].format_code);
5250                         audio_info->modes[i].channel_count =
5251                                         edid_caps->audio_modes[i].channel_count;
5252                         audio_info->modes[i].sample_rates.all =
5253                                         edid_caps->audio_modes[i].sample_rate;
5254                         audio_info->modes[i].sample_size =
5255                                         edid_caps->audio_modes[i].sample_size;
5256                 }
5257         }
5258
5259         audio_info->flags.all = edid_caps->speaker_flags;
5260
5261         /* TODO: We only check for the progressive mode, check for interlace mode too */
5262         if (drm_connector->latency_present[0]) {
5263                 audio_info->video_latency = drm_connector->video_latency[0];
5264                 audio_info->audio_latency = drm_connector->audio_latency[0];
5265         }
5266
5267         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5268
5269 }
5270
5271 static void
5272 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5273                                       struct drm_display_mode *dst_mode)
5274 {
5275         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5276         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5277         dst_mode->crtc_clock = src_mode->crtc_clock;
5278         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5279         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5280         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5281         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5282         dst_mode->crtc_htotal = src_mode->crtc_htotal;
5283         dst_mode->crtc_hskew = src_mode->crtc_hskew;
5284         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5285         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5286         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5287         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5288         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5289 }
5290
5291 static void
5292 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5293                                         const struct drm_display_mode *native_mode,
5294                                         bool scale_enabled)
5295 {
5296         if (scale_enabled) {
5297                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5298         } else if (native_mode->clock == drm_mode->clock &&
5299                         native_mode->htotal == drm_mode->htotal &&
5300                         native_mode->vtotal == drm_mode->vtotal) {
5301                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5302         } else {
5303                 /* no scaling nor amdgpu inserted, no need to patch */
5304         }
5305 }
5306
5307 static struct dc_sink *
5308 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5309 {
5310         struct dc_sink_init_data sink_init_data = { 0 };
5311         struct dc_sink *sink = NULL;
5312         sink_init_data.link = aconnector->dc_link;
5313         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5314
5315         sink = dc_sink_create(&sink_init_data);
5316         if (!sink) {
5317                 DRM_ERROR("Failed to create sink!\n");
5318                 return NULL;
5319         }
5320         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5321
5322         return sink;
5323 }
5324
5325 static void set_multisync_trigger_params(
5326                 struct dc_stream_state *stream)
5327 {
5328         struct dc_stream_state *master = NULL;
5329
5330         if (stream->triggered_crtc_reset.enabled) {
5331                 master = stream->triggered_crtc_reset.event_source;
5332                 stream->triggered_crtc_reset.event =
5333                         master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5334                         CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5335                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5336         }
5337 }
5338
5339 static void set_master_stream(struct dc_stream_state *stream_set[],
5340                               int stream_count)
5341 {
5342         int j, highest_rfr = 0, master_stream = 0;
5343
5344         for (j = 0;  j < stream_count; j++) {
5345                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5346                         int refresh_rate = 0;
5347
5348                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5349                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5350                         if (refresh_rate > highest_rfr) {
5351                                 highest_rfr = refresh_rate;
5352                                 master_stream = j;
5353                         }
5354                 }
5355         }
5356         for (j = 0;  j < stream_count; j++) {
5357                 if (stream_set[j])
5358                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5359         }
5360 }
5361
5362 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5363 {
5364         int i = 0;
5365         struct dc_stream_state *stream;
5366
5367         if (context->stream_count < 2)
5368                 return;
5369         for (i = 0; i < context->stream_count ; i++) {
5370                 if (!context->streams[i])
5371                         continue;
5372                 /*
5373                  * TODO: add a function to read AMD VSDB bits and set
5374                  * crtc_sync_master.multi_sync_enabled flag
5375                  * For now it's set to false
5376                  */
5377         }
5378
5379         set_master_stream(context->streams, context->stream_count);
5380
5381         for (i = 0; i < context->stream_count ; i++) {
5382                 stream = context->streams[i];
5383
5384                 if (!stream)
5385                         continue;
5386
5387                 set_multisync_trigger_params(stream);
5388         }
5389 }
5390
5391 static struct drm_display_mode *
5392 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5393                           bool use_probed_modes)
5394 {
5395         struct drm_display_mode *m, *m_pref = NULL;
5396         u16 current_refresh, highest_refresh;
5397         struct list_head *list_head = use_probed_modes ?
5398                                                     &aconnector->base.probed_modes :
5399                                                     &aconnector->base.modes;
5400
5401         if (aconnector->freesync_vid_base.clock != 0)
5402                 return &aconnector->freesync_vid_base;
5403
5404         /* Find the preferred mode */
5405         list_for_each_entry (m, list_head, head) {
5406                 if (m->type & DRM_MODE_TYPE_PREFERRED) {
5407                         m_pref = m;
5408                         break;
5409                 }
5410         }
5411
5412         if (!m_pref) {
5413                 /* Probably an EDID with no preferred mode. Fallback to first entry */
5414                 m_pref = list_first_entry_or_null(
5415                         &aconnector->base.modes, struct drm_display_mode, head);
5416                 if (!m_pref) {
5417                         DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5418                         return NULL;
5419                 }
5420         }
5421
5422         highest_refresh = drm_mode_vrefresh(m_pref);
5423
5424         /*
5425          * Find the mode with highest refresh rate with same resolution.
5426          * For some monitors, preferred mode is not the mode with highest
5427          * supported refresh rate.
5428          */
5429         list_for_each_entry (m, list_head, head) {
5430                 current_refresh  = drm_mode_vrefresh(m);
5431
5432                 if (m->hdisplay == m_pref->hdisplay &&
5433                     m->vdisplay == m_pref->vdisplay &&
5434                     highest_refresh < current_refresh) {
5435                         highest_refresh = current_refresh;
5436                         m_pref = m;
5437                 }
5438         }
5439
5440         aconnector->freesync_vid_base = *m_pref;
5441         return m_pref;
5442 }
5443
5444 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
5445                                    struct amdgpu_dm_connector *aconnector)
5446 {
5447         struct drm_display_mode *high_mode;
5448         int timing_diff;
5449
5450         high_mode = get_highest_refresh_rate_mode(aconnector, false);
5451         if (!high_mode || !mode)
5452                 return false;
5453
5454         timing_diff = high_mode->vtotal - mode->vtotal;
5455
5456         if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5457             high_mode->hdisplay != mode->hdisplay ||
5458             high_mode->vdisplay != mode->vdisplay ||
5459             high_mode->hsync_start != mode->hsync_start ||
5460             high_mode->hsync_end != mode->hsync_end ||
5461             high_mode->htotal != mode->htotal ||
5462             high_mode->hskew != mode->hskew ||
5463             high_mode->vscan != mode->vscan ||
5464             high_mode->vsync_start - mode->vsync_start != timing_diff ||
5465             high_mode->vsync_end - mode->vsync_end != timing_diff)
5466                 return false;
5467         else
5468                 return true;
5469 }
5470
5471 static struct dc_stream_state *
5472 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5473                        const struct drm_display_mode *drm_mode,
5474                        const struct dm_connector_state *dm_state,
5475                        const struct dc_stream_state *old_stream,
5476                        int requested_bpc)
5477 {
5478         struct drm_display_mode *preferred_mode = NULL;
5479         struct drm_connector *drm_connector;
5480         const struct drm_connector_state *con_state =
5481                 dm_state ? &dm_state->base : NULL;
5482         struct dc_stream_state *stream = NULL;
5483         struct drm_display_mode mode = *drm_mode;
5484         struct drm_display_mode saved_mode;
5485         struct drm_display_mode *freesync_mode = NULL;
5486         bool native_mode_found = false;
5487         bool recalculate_timing = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5488         int mode_refresh;
5489         int preferred_refresh = 0;
5490 #if defined(CONFIG_DRM_AMD_DC_DCN)
5491         struct dsc_dec_dpcd_caps dsc_caps;
5492         uint32_t link_bandwidth_kbps;
5493 #endif
5494         struct dc_sink *sink = NULL;
5495
5496         memset(&saved_mode, 0, sizeof(saved_mode));
5497
5498         if (aconnector == NULL) {
5499                 DRM_ERROR("aconnector is NULL!\n");
5500                 return stream;
5501         }
5502
5503         drm_connector = &aconnector->base;
5504
5505         if (!aconnector->dc_sink) {
5506                 sink = create_fake_sink(aconnector);
5507                 if (!sink)
5508                         return stream;
5509         } else {
5510                 sink = aconnector->dc_sink;
5511                 dc_sink_retain(sink);
5512         }
5513
5514         stream = dc_create_stream_for_sink(sink);
5515
5516         if (stream == NULL) {
5517                 DRM_ERROR("Failed to create stream for sink!\n");
5518                 goto finish;
5519         }
5520
5521         stream->dm_stream_context = aconnector;
5522
5523         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5524                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5525
5526         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5527                 /* Search for preferred mode */
5528                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5529                         native_mode_found = true;
5530                         break;
5531                 }
5532         }
5533         if (!native_mode_found)
5534                 preferred_mode = list_first_entry_or_null(
5535                                 &aconnector->base.modes,
5536                                 struct drm_display_mode,
5537                                 head);
5538
5539         mode_refresh = drm_mode_vrefresh(&mode);
5540
5541         if (preferred_mode == NULL) {
5542                 /*
5543                  * This may not be an error, the use case is when we have no
5544                  * usermode calls to reset and set mode upon hotplug. In this
5545                  * case, we call set mode ourselves to restore the previous mode
5546                  * and the modelist may not be filled in in time.
5547                  */
5548                 DRM_DEBUG_DRIVER("No preferred mode found\n");
5549         } else {
5550                 recalculate_timing |= amdgpu_freesync_vid_mode &&
5551                                  is_freesync_video_mode(&mode, aconnector);
5552                 if (recalculate_timing) {
5553                         freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5554                         saved_mode = mode;
5555                         mode = *freesync_mode;
5556                 } else {
5557                         decide_crtc_timing_for_drm_display_mode(
5558                                 &mode, preferred_mode,
5559                                 dm_state ? (dm_state->scaling != RMX_OFF) : false);
5560                 }
5561
5562                 preferred_refresh = drm_mode_vrefresh(preferred_mode);
5563         }
5564
5565         if (recalculate_timing)
5566                 drm_mode_set_crtcinfo(&saved_mode, 0);
5567         else if (!dm_state)
5568                 drm_mode_set_crtcinfo(&mode, 0);
5569
5570        /*
5571         * If scaling is enabled and refresh rate didn't change
5572         * we copy the vic and polarities of the old timings
5573         */
5574         if (!recalculate_timing || mode_refresh != preferred_refresh)
5575                 fill_stream_properties_from_drm_display_mode(
5576                         stream, &mode, &aconnector->base, con_state, NULL,
5577                         requested_bpc);
5578         else
5579                 fill_stream_properties_from_drm_display_mode(
5580                         stream, &mode, &aconnector->base, con_state, old_stream,
5581                         requested_bpc);
5582
5583         stream->timing.flags.DSC = 0;
5584
5585         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5586 #if defined(CONFIG_DRM_AMD_DC_DCN)
5587                 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5588                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5589                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5590                                       &dsc_caps);
5591                 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5592                                                              dc_link_get_link_cap(aconnector->dc_link));
5593
5594                 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5595                         /* Set DSC policy according to dsc_clock_en */
5596                         dc_dsc_policy_set_enable_dsc_when_not_needed(
5597                                 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5598
5599                         if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5600                                                   &dsc_caps,
5601                                                   aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5602                                                   0,
5603                                                   link_bandwidth_kbps,
5604                                                   &stream->timing,
5605                                                   &stream->timing.dsc_cfg))
5606                                 stream->timing.flags.DSC = 1;
5607                         /* Overwrite the stream flag if DSC is enabled through debugfs */
5608                         if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5609                                 stream->timing.flags.DSC = 1;
5610
5611                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5612                                 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5613
5614                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5615                                 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5616
5617                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5618                                 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5619                 }
5620 #endif
5621         }
5622
5623         update_stream_scaling_settings(&mode, dm_state, stream);
5624
5625         fill_audio_info(
5626                 &stream->audio_info,
5627                 drm_connector,
5628                 sink);
5629
5630         update_stream_signal(stream, sink);
5631
5632         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5633                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5634
5635         if (stream->link->psr_settings.psr_feature_enabled) {
5636                 //
5637                 // should decide stream support vsc sdp colorimetry capability
5638                 // before building vsc info packet
5639                 //
5640                 stream->use_vsc_sdp_for_colorimetry = false;
5641                 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5642                         stream->use_vsc_sdp_for_colorimetry =
5643                                 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5644                 } else {
5645                         if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5646                                 stream->use_vsc_sdp_for_colorimetry = true;
5647                 }
5648                 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5649         }
5650 finish:
5651         dc_sink_release(sink);
5652
5653         return stream;
5654 }
5655
5656 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5657 {
5658         drm_crtc_cleanup(crtc);
5659         kfree(crtc);
5660 }
5661
5662 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5663                                   struct drm_crtc_state *state)
5664 {
5665         struct dm_crtc_state *cur = to_dm_crtc_state(state);
5666
5667         /* TODO Destroy dc_stream objects are stream object is flattened */
5668         if (cur->stream)
5669                 dc_stream_release(cur->stream);
5670
5671
5672         __drm_atomic_helper_crtc_destroy_state(state);
5673
5674
5675         kfree(state);
5676 }
5677
5678 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5679 {
5680         struct dm_crtc_state *state;
5681
5682         if (crtc->state)
5683                 dm_crtc_destroy_state(crtc, crtc->state);
5684
5685         state = kzalloc(sizeof(*state), GFP_KERNEL);
5686         if (WARN_ON(!state))
5687                 return;
5688
5689         __drm_atomic_helper_crtc_reset(crtc, &state->base);
5690 }
5691
5692 static struct drm_crtc_state *
5693 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5694 {
5695         struct dm_crtc_state *state, *cur;
5696
5697         cur = to_dm_crtc_state(crtc->state);
5698
5699         if (WARN_ON(!crtc->state))
5700                 return NULL;
5701
5702         state = kzalloc(sizeof(*state), GFP_KERNEL);
5703         if (!state)
5704                 return NULL;
5705
5706         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5707
5708         if (cur->stream) {
5709                 state->stream = cur->stream;
5710                 dc_stream_retain(state->stream);
5711         }
5712
5713         state->active_planes = cur->active_planes;
5714         state->vrr_infopacket = cur->vrr_infopacket;
5715         state->abm_level = cur->abm_level;
5716         state->vrr_supported = cur->vrr_supported;
5717         state->freesync_config = cur->freesync_config;
5718         state->cm_has_degamma = cur->cm_has_degamma;
5719         state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5720         /* TODO Duplicate dc_stream after objects are stream object is flattened */
5721
5722         return &state->base;
5723 }
5724
5725 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
5726 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
5727 {
5728         crtc_debugfs_init(crtc);
5729
5730         return 0;
5731 }
5732 #endif
5733
5734 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5735 {
5736         enum dc_irq_source irq_source;
5737         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5738         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5739         int rc;
5740
5741         irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5742
5743         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5744
5745         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
5746                       acrtc->crtc_id, enable ? "en" : "dis", rc);
5747         return rc;
5748 }
5749
5750 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5751 {
5752         enum dc_irq_source irq_source;
5753         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5754         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5755         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5756 #if defined(CONFIG_DRM_AMD_DC_DCN)
5757         struct amdgpu_display_manager *dm = &adev->dm;
5758         unsigned long flags;
5759 #endif
5760         int rc = 0;
5761
5762         if (enable) {
5763                 /* vblank irq on -> Only need vupdate irq in vrr mode */
5764                 if (amdgpu_dm_vrr_active(acrtc_state))
5765                         rc = dm_set_vupdate_irq(crtc, true);
5766         } else {
5767                 /* vblank irq off -> vupdate irq off */
5768                 rc = dm_set_vupdate_irq(crtc, false);
5769         }
5770
5771         if (rc)
5772                 return rc;
5773
5774         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5775
5776         if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5777                 return -EBUSY;
5778
5779         if (amdgpu_in_reset(adev))
5780                 return 0;
5781
5782 #if defined(CONFIG_DRM_AMD_DC_DCN)
5783         spin_lock_irqsave(&dm->vblank_lock, flags);
5784         dm->vblank_workqueue->dm = dm;
5785         dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
5786         dm->vblank_workqueue->enable = enable;
5787         spin_unlock_irqrestore(&dm->vblank_lock, flags);
5788         schedule_work(&dm->vblank_workqueue->mall_work);
5789 #endif
5790
5791         return 0;
5792 }
5793
5794 static int dm_enable_vblank(struct drm_crtc *crtc)
5795 {
5796         return dm_set_vblank(crtc, true);
5797 }
5798
5799 static void dm_disable_vblank(struct drm_crtc *crtc)
5800 {
5801         dm_set_vblank(crtc, false);
5802 }
5803
5804 /* Implemented only the options currently availible for the driver */
5805 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5806         .reset = dm_crtc_reset_state,
5807         .destroy = amdgpu_dm_crtc_destroy,
5808         .set_config = drm_atomic_helper_set_config,
5809         .page_flip = drm_atomic_helper_page_flip,
5810         .atomic_duplicate_state = dm_crtc_duplicate_state,
5811         .atomic_destroy_state = dm_crtc_destroy_state,
5812         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
5813         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5814         .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5815         .get_vblank_counter = amdgpu_get_vblank_counter_kms,
5816         .enable_vblank = dm_enable_vblank,
5817         .disable_vblank = dm_disable_vblank,
5818         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5819 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
5820         .late_register = amdgpu_dm_crtc_late_register,
5821 #endif
5822 };
5823
5824 static enum drm_connector_status
5825 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5826 {
5827         bool connected;
5828         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5829
5830         /*
5831          * Notes:
5832          * 1. This interface is NOT called in context of HPD irq.
5833          * 2. This interface *is called* in context of user-mode ioctl. Which
5834          * makes it a bad place for *any* MST-related activity.
5835          */
5836
5837         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5838             !aconnector->fake_enable)
5839                 connected = (aconnector->dc_sink != NULL);
5840         else
5841                 connected = (aconnector->base.force == DRM_FORCE_ON);
5842
5843         update_subconnector_property(aconnector);
5844
5845         return (connected ? connector_status_connected :
5846                         connector_status_disconnected);
5847 }
5848
5849 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5850                                             struct drm_connector_state *connector_state,
5851                                             struct drm_property *property,
5852                                             uint64_t val)
5853 {
5854         struct drm_device *dev = connector->dev;
5855         struct amdgpu_device *adev = drm_to_adev(dev);
5856         struct dm_connector_state *dm_old_state =
5857                 to_dm_connector_state(connector->state);
5858         struct dm_connector_state *dm_new_state =
5859                 to_dm_connector_state(connector_state);
5860
5861         int ret = -EINVAL;
5862
5863         if (property == dev->mode_config.scaling_mode_property) {
5864                 enum amdgpu_rmx_type rmx_type;
5865
5866                 switch (val) {
5867                 case DRM_MODE_SCALE_CENTER:
5868                         rmx_type = RMX_CENTER;
5869                         break;
5870                 case DRM_MODE_SCALE_ASPECT:
5871                         rmx_type = RMX_ASPECT;
5872                         break;
5873                 case DRM_MODE_SCALE_FULLSCREEN:
5874                         rmx_type = RMX_FULL;
5875                         break;
5876                 case DRM_MODE_SCALE_NONE:
5877                 default:
5878                         rmx_type = RMX_OFF;
5879                         break;
5880                 }
5881
5882                 if (dm_old_state->scaling == rmx_type)
5883                         return 0;
5884
5885                 dm_new_state->scaling = rmx_type;
5886                 ret = 0;
5887         } else if (property == adev->mode_info.underscan_hborder_property) {
5888                 dm_new_state->underscan_hborder = val;
5889                 ret = 0;
5890         } else if (property == adev->mode_info.underscan_vborder_property) {
5891                 dm_new_state->underscan_vborder = val;
5892                 ret = 0;
5893         } else if (property == adev->mode_info.underscan_property) {
5894                 dm_new_state->underscan_enable = val;
5895                 ret = 0;
5896         } else if (property == adev->mode_info.abm_level_property) {
5897                 dm_new_state->abm_level = val;
5898                 ret = 0;
5899         }
5900
5901         return ret;
5902 }
5903
5904 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5905                                             const struct drm_connector_state *state,
5906                                             struct drm_property *property,
5907                                             uint64_t *val)
5908 {
5909         struct drm_device *dev = connector->dev;
5910         struct amdgpu_device *adev = drm_to_adev(dev);
5911         struct dm_connector_state *dm_state =
5912                 to_dm_connector_state(state);
5913         int ret = -EINVAL;
5914
5915         if (property == dev->mode_config.scaling_mode_property) {
5916                 switch (dm_state->scaling) {
5917                 case RMX_CENTER:
5918                         *val = DRM_MODE_SCALE_CENTER;
5919                         break;
5920                 case RMX_ASPECT:
5921                         *val = DRM_MODE_SCALE_ASPECT;
5922                         break;
5923                 case RMX_FULL:
5924                         *val = DRM_MODE_SCALE_FULLSCREEN;
5925                         break;
5926                 case RMX_OFF:
5927                 default:
5928                         *val = DRM_MODE_SCALE_NONE;
5929                         break;
5930                 }
5931                 ret = 0;
5932         } else if (property == adev->mode_info.underscan_hborder_property) {
5933                 *val = dm_state->underscan_hborder;
5934                 ret = 0;
5935         } else if (property == adev->mode_info.underscan_vborder_property) {
5936                 *val = dm_state->underscan_vborder;
5937                 ret = 0;
5938         } else if (property == adev->mode_info.underscan_property) {
5939                 *val = dm_state->underscan_enable;
5940                 ret = 0;
5941         } else if (property == adev->mode_info.abm_level_property) {
5942                 *val = dm_state->abm_level;
5943                 ret = 0;
5944         }
5945
5946         return ret;
5947 }
5948
5949 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5950 {
5951         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5952
5953         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5954 }
5955
5956 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5957 {
5958         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5959         const struct dc_link *link = aconnector->dc_link;
5960         struct amdgpu_device *adev = drm_to_adev(connector->dev);
5961         struct amdgpu_display_manager *dm = &adev->dm;
5962
5963         /*
5964          * Call only if mst_mgr was iniitalized before since it's not done
5965          * for all connector types.
5966          */
5967         if (aconnector->mst_mgr.dev)
5968                 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5969
5970 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5971         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5972
5973         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5974             link->type != dc_connection_none &&
5975             dm->backlight_dev) {
5976                 backlight_device_unregister(dm->backlight_dev);
5977                 dm->backlight_dev = NULL;
5978         }
5979 #endif
5980
5981         if (aconnector->dc_em_sink)
5982                 dc_sink_release(aconnector->dc_em_sink);
5983         aconnector->dc_em_sink = NULL;
5984         if (aconnector->dc_sink)
5985                 dc_sink_release(aconnector->dc_sink);
5986         aconnector->dc_sink = NULL;
5987
5988         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5989         drm_connector_unregister(connector);
5990         drm_connector_cleanup(connector);
5991         if (aconnector->i2c) {
5992                 i2c_del_adapter(&aconnector->i2c->base);
5993                 kfree(aconnector->i2c);
5994         }
5995         kfree(aconnector->dm_dp_aux.aux.name);
5996
5997         kfree(connector);
5998 }
5999
6000 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6001 {
6002         struct dm_connector_state *state =
6003                 to_dm_connector_state(connector->state);
6004
6005         if (connector->state)
6006                 __drm_atomic_helper_connector_destroy_state(connector->state);
6007
6008         kfree(state);
6009
6010         state = kzalloc(sizeof(*state), GFP_KERNEL);
6011
6012         if (state) {
6013                 state->scaling = RMX_OFF;
6014                 state->underscan_enable = false;
6015                 state->underscan_hborder = 0;
6016                 state->underscan_vborder = 0;
6017                 state->base.max_requested_bpc = 8;
6018                 state->vcpi_slots = 0;
6019                 state->pbn = 0;
6020                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6021                         state->abm_level = amdgpu_dm_abm_level;
6022
6023                 __drm_atomic_helper_connector_reset(connector, &state->base);
6024         }
6025 }
6026
6027 struct drm_connector_state *
6028 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6029 {
6030         struct dm_connector_state *state =
6031                 to_dm_connector_state(connector->state);
6032
6033         struct dm_connector_state *new_state =
6034                         kmemdup(state, sizeof(*state), GFP_KERNEL);
6035
6036         if (!new_state)
6037                 return NULL;
6038
6039         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6040
6041         new_state->freesync_capable = state->freesync_capable;
6042         new_state->abm_level = state->abm_level;
6043         new_state->scaling = state->scaling;
6044         new_state->underscan_enable = state->underscan_enable;
6045         new_state->underscan_hborder = state->underscan_hborder;
6046         new_state->underscan_vborder = state->underscan_vborder;
6047         new_state->vcpi_slots = state->vcpi_slots;
6048         new_state->pbn = state->pbn;
6049         return &new_state->base;
6050 }
6051
6052 static int
6053 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6054 {
6055         struct amdgpu_dm_connector *amdgpu_dm_connector =
6056                 to_amdgpu_dm_connector(connector);
6057         int r;
6058
6059         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6060             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6061                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6062                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6063                 if (r)
6064                         return r;
6065         }
6066
6067 #if defined(CONFIG_DEBUG_FS)
6068         connector_debugfs_init(amdgpu_dm_connector);
6069 #endif
6070
6071         return 0;
6072 }
6073
6074 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6075         .reset = amdgpu_dm_connector_funcs_reset,
6076         .detect = amdgpu_dm_connector_detect,
6077         .fill_modes = drm_helper_probe_single_connector_modes,
6078         .destroy = amdgpu_dm_connector_destroy,
6079         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6080         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6081         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6082         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6083         .late_register = amdgpu_dm_connector_late_register,
6084         .early_unregister = amdgpu_dm_connector_unregister
6085 };
6086
6087 static int get_modes(struct drm_connector *connector)
6088 {
6089         return amdgpu_dm_connector_get_modes(connector);
6090 }
6091
6092 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6093 {
6094         struct dc_sink_init_data init_params = {
6095                         .link = aconnector->dc_link,
6096                         .sink_signal = SIGNAL_TYPE_VIRTUAL
6097         };
6098         struct edid *edid;
6099
6100         if (!aconnector->base.edid_blob_ptr) {
6101                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6102                                 aconnector->base.name);
6103
6104                 aconnector->base.force = DRM_FORCE_OFF;
6105                 aconnector->base.override_edid = false;
6106                 return;
6107         }
6108
6109         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6110
6111         aconnector->edid = edid;
6112
6113         aconnector->dc_em_sink = dc_link_add_remote_sink(
6114                 aconnector->dc_link,
6115                 (uint8_t *)edid,
6116                 (edid->extensions + 1) * EDID_LENGTH,
6117                 &init_params);
6118
6119         if (aconnector->base.force == DRM_FORCE_ON) {
6120                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6121                 aconnector->dc_link->local_sink :
6122                 aconnector->dc_em_sink;
6123                 dc_sink_retain(aconnector->dc_sink);
6124         }
6125 }
6126
6127 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6128 {
6129         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6130
6131         /*
6132          * In case of headless boot with force on for DP managed connector
6133          * Those settings have to be != 0 to get initial modeset
6134          */
6135         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6136                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6137                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6138         }
6139
6140
6141         aconnector->base.override_edid = true;
6142         create_eml_sink(aconnector);
6143 }
6144
6145 static struct dc_stream_state *
6146 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6147                                 const struct drm_display_mode *drm_mode,
6148                                 const struct dm_connector_state *dm_state,
6149                                 const struct dc_stream_state *old_stream)
6150 {
6151         struct drm_connector *connector = &aconnector->base;
6152         struct amdgpu_device *adev = drm_to_adev(connector->dev);
6153         struct dc_stream_state *stream;
6154         const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6155         int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6156         enum dc_status dc_result = DC_OK;
6157
6158         do {
6159                 stream = create_stream_for_sink(aconnector, drm_mode,
6160                                                 dm_state, old_stream,
6161                                                 requested_bpc);
6162                 if (stream == NULL) {
6163                         DRM_ERROR("Failed to create stream for sink!\n");
6164                         break;
6165                 }
6166
6167                 dc_result = dc_validate_stream(adev->dm.dc, stream);
6168
6169                 if (dc_result != DC_OK) {
6170                         DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6171                                       drm_mode->hdisplay,
6172                                       drm_mode->vdisplay,
6173                                       drm_mode->clock,
6174                                       dc_result,
6175                                       dc_status_to_str(dc_result));
6176
6177                         dc_stream_release(stream);
6178                         stream = NULL;
6179                         requested_bpc -= 2; /* lower bpc to retry validation */
6180                 }
6181
6182         } while (stream == NULL && requested_bpc >= 6);
6183
6184         if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6185                 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6186
6187                 aconnector->force_yuv420_output = true;
6188                 stream = create_validate_stream_for_sink(aconnector, drm_mode,
6189                                                 dm_state, old_stream);
6190                 aconnector->force_yuv420_output = false;
6191         }
6192
6193         return stream;
6194 }
6195
6196 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6197                                    struct drm_display_mode *mode)
6198 {
6199         int result = MODE_ERROR;
6200         struct dc_sink *dc_sink;
6201         /* TODO: Unhardcode stream count */
6202         struct dc_stream_state *stream;
6203         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6204
6205         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6206                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6207                 return result;
6208
6209         /*
6210          * Only run this the first time mode_valid is called to initilialize
6211          * EDID mgmt
6212          */
6213         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6214                 !aconnector->dc_em_sink)
6215                 handle_edid_mgmt(aconnector);
6216
6217         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6218
6219         if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6220                                 aconnector->base.force != DRM_FORCE_ON) {
6221                 DRM_ERROR("dc_sink is NULL!\n");
6222                 goto fail;
6223         }
6224
6225         stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6226         if (stream) {
6227                 dc_stream_release(stream);
6228                 result = MODE_OK;
6229         }
6230
6231 fail:
6232         /* TODO: error handling*/
6233         return result;
6234 }
6235
6236 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6237                                 struct dc_info_packet *out)
6238 {
6239         struct hdmi_drm_infoframe frame;
6240         unsigned char buf[30]; /* 26 + 4 */
6241         ssize_t len;
6242         int ret, i;
6243
6244         memset(out, 0, sizeof(*out));
6245
6246         if (!state->hdr_output_metadata)
6247                 return 0;
6248
6249         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6250         if (ret)
6251                 return ret;
6252
6253         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6254         if (len < 0)
6255                 return (int)len;
6256
6257         /* Static metadata is a fixed 26 bytes + 4 byte header. */
6258         if (len != 30)
6259                 return -EINVAL;
6260
6261         /* Prepare the infopacket for DC. */
6262         switch (state->connector->connector_type) {
6263         case DRM_MODE_CONNECTOR_HDMIA:
6264                 out->hb0 = 0x87; /* type */
6265                 out->hb1 = 0x01; /* version */
6266                 out->hb2 = 0x1A; /* length */
6267                 out->sb[0] = buf[3]; /* checksum */
6268                 i = 1;
6269                 break;
6270
6271         case DRM_MODE_CONNECTOR_DisplayPort:
6272         case DRM_MODE_CONNECTOR_eDP:
6273                 out->hb0 = 0x00; /* sdp id, zero */
6274                 out->hb1 = 0x87; /* type */
6275                 out->hb2 = 0x1D; /* payload len - 1 */
6276                 out->hb3 = (0x13 << 2); /* sdp version */
6277                 out->sb[0] = 0x01; /* version */
6278                 out->sb[1] = 0x1A; /* length */
6279                 i = 2;
6280                 break;
6281
6282         default:
6283                 return -EINVAL;
6284         }
6285
6286         memcpy(&out->sb[i], &buf[4], 26);
6287         out->valid = true;
6288
6289         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6290                        sizeof(out->sb), false);
6291
6292         return 0;
6293 }
6294
6295 static bool
6296 is_hdr_metadata_different(const struct drm_connector_state *old_state,
6297                           const struct drm_connector_state *new_state)
6298 {
6299         struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
6300         struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
6301
6302         if (old_blob != new_blob) {
6303                 if (old_blob && new_blob &&
6304                     old_blob->length == new_blob->length)
6305                         return memcmp(old_blob->data, new_blob->data,
6306                                       old_blob->length);
6307
6308                 return true;
6309         }
6310
6311         return false;
6312 }
6313
6314 static int
6315 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6316                                  struct drm_atomic_state *state)
6317 {
6318         struct drm_connector_state *new_con_state =
6319                 drm_atomic_get_new_connector_state(state, conn);
6320         struct drm_connector_state *old_con_state =
6321                 drm_atomic_get_old_connector_state(state, conn);
6322         struct drm_crtc *crtc = new_con_state->crtc;
6323         struct drm_crtc_state *new_crtc_state;
6324         int ret;
6325
6326         trace_amdgpu_dm_connector_atomic_check(new_con_state);
6327
6328         if (!crtc)
6329                 return 0;
6330
6331         if (is_hdr_metadata_different(old_con_state, new_con_state)) {
6332                 struct dc_info_packet hdr_infopacket;
6333
6334                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6335                 if (ret)
6336                         return ret;
6337
6338                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6339                 if (IS_ERR(new_crtc_state))
6340                         return PTR_ERR(new_crtc_state);
6341
6342                 /*
6343                  * DC considers the stream backends changed if the
6344                  * static metadata changes. Forcing the modeset also
6345                  * gives a simple way for userspace to switch from
6346                  * 8bpc to 10bpc when setting the metadata to enter
6347                  * or exit HDR.
6348                  *
6349                  * Changing the static metadata after it's been
6350                  * set is permissible, however. So only force a
6351                  * modeset if we're entering or exiting HDR.
6352                  */
6353                 new_crtc_state->mode_changed =
6354                         !old_con_state->hdr_output_metadata ||
6355                         !new_con_state->hdr_output_metadata;
6356         }
6357
6358         return 0;
6359 }
6360
6361 static const struct drm_connector_helper_funcs
6362 amdgpu_dm_connector_helper_funcs = {
6363         /*
6364          * If hotplugging a second bigger display in FB Con mode, bigger resolution
6365          * modes will be filtered by drm_mode_validate_size(), and those modes
6366          * are missing after user start lightdm. So we need to renew modes list.
6367          * in get_modes call back, not just return the modes count
6368          */
6369         .get_modes = get_modes,
6370         .mode_valid = amdgpu_dm_connector_mode_valid,
6371         .atomic_check = amdgpu_dm_connector_atomic_check,
6372 };
6373
6374 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6375 {
6376 }
6377
6378 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6379 {
6380         struct drm_atomic_state *state = new_crtc_state->state;
6381         struct drm_plane *plane;
6382         int num_active = 0;
6383
6384         drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6385                 struct drm_plane_state *new_plane_state;
6386
6387                 /* Cursor planes are "fake". */
6388                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6389                         continue;
6390
6391                 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6392
6393                 if (!new_plane_state) {
6394                         /*
6395                          * The plane is enable on the CRTC and hasn't changed
6396                          * state. This means that it previously passed
6397                          * validation and is therefore enabled.
6398                          */
6399                         num_active += 1;
6400                         continue;
6401                 }
6402
6403                 /* We need a framebuffer to be considered enabled. */
6404                 num_active += (new_plane_state->fb != NULL);
6405         }
6406
6407         return num_active;
6408 }
6409
6410 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6411                                          struct drm_crtc_state *new_crtc_state)
6412 {
6413         struct dm_crtc_state *dm_new_crtc_state =
6414                 to_dm_crtc_state(new_crtc_state);
6415
6416         dm_new_crtc_state->active_planes = 0;
6417
6418         if (!dm_new_crtc_state->stream)
6419                 return;
6420
6421         dm_new_crtc_state->active_planes =
6422                 count_crtc_active_planes(new_crtc_state);
6423 }
6424
6425 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6426                                        struct drm_atomic_state *state)
6427 {
6428         struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6429                                                                           crtc);
6430         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6431         struct dc *dc = adev->dm.dc;
6432         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6433         int ret = -EINVAL;
6434
6435         trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6436
6437         dm_update_crtc_active_planes(crtc, crtc_state);
6438
6439         if (unlikely(!dm_crtc_state->stream &&
6440                      modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6441                 WARN_ON(1);
6442                 return ret;
6443         }
6444
6445         /*
6446          * We require the primary plane to be enabled whenever the CRTC is, otherwise
6447          * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6448          * planes are disabled, which is not supported by the hardware. And there is legacy
6449          * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6450          */
6451         if (crtc_state->enable &&
6452             !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6453                 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6454                 return -EINVAL;
6455         }
6456
6457         /* In some use cases, like reset, no stream is attached */
6458         if (!dm_crtc_state->stream)
6459                 return 0;
6460
6461         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6462                 return 0;
6463
6464         DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6465         return ret;
6466 }
6467
6468 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6469                                       const struct drm_display_mode *mode,
6470                                       struct drm_display_mode *adjusted_mode)
6471 {
6472         return true;
6473 }
6474
6475 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6476         .disable = dm_crtc_helper_disable,
6477         .atomic_check = dm_crtc_helper_atomic_check,
6478         .mode_fixup = dm_crtc_helper_mode_fixup,
6479         .get_scanout_position = amdgpu_crtc_get_scanout_position,
6480 };
6481
6482 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6483 {
6484
6485 }
6486
6487 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6488 {
6489         switch (display_color_depth) {
6490                 case COLOR_DEPTH_666:
6491                         return 6;
6492                 case COLOR_DEPTH_888:
6493                         return 8;
6494                 case COLOR_DEPTH_101010:
6495                         return 10;
6496                 case COLOR_DEPTH_121212:
6497                         return 12;
6498                 case COLOR_DEPTH_141414:
6499                         return 14;
6500                 case COLOR_DEPTH_161616:
6501                         return 16;
6502                 default:
6503                         break;
6504                 }
6505         return 0;
6506 }
6507
6508 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6509                                           struct drm_crtc_state *crtc_state,
6510                                           struct drm_connector_state *conn_state)
6511 {
6512         struct drm_atomic_state *state = crtc_state->state;
6513         struct drm_connector *connector = conn_state->connector;
6514         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6515         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6516         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6517         struct drm_dp_mst_topology_mgr *mst_mgr;
6518         struct drm_dp_mst_port *mst_port;
6519         enum dc_color_depth color_depth;
6520         int clock, bpp = 0;
6521         bool is_y420 = false;
6522
6523         if (!aconnector->port || !aconnector->dc_sink)
6524                 return 0;
6525
6526         mst_port = aconnector->port;
6527         mst_mgr = &aconnector->mst_port->mst_mgr;
6528
6529         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6530                 return 0;
6531
6532         if (!state->duplicated) {
6533                 int max_bpc = conn_state->max_requested_bpc;
6534                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6535                                 aconnector->force_yuv420_output;
6536                 color_depth = convert_color_depth_from_display_info(connector,
6537                                                                     is_y420,
6538                                                                     max_bpc);
6539                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6540                 clock = adjusted_mode->clock;
6541                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6542         }
6543         dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6544                                                                            mst_mgr,
6545                                                                            mst_port,
6546                                                                            dm_new_connector_state->pbn,
6547                                                                            dm_mst_get_pbn_divider(aconnector->dc_link));
6548         if (dm_new_connector_state->vcpi_slots < 0) {
6549                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6550                 return dm_new_connector_state->vcpi_slots;
6551         }
6552         return 0;
6553 }
6554
6555 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6556         .disable = dm_encoder_helper_disable,
6557         .atomic_check = dm_encoder_helper_atomic_check
6558 };
6559
6560 #if defined(CONFIG_DRM_AMD_DC_DCN)
6561 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6562                                             struct dc_state *dc_state)
6563 {
6564         struct dc_stream_state *stream = NULL;
6565         struct drm_connector *connector;
6566         struct drm_connector_state *new_con_state, *old_con_state;
6567         struct amdgpu_dm_connector *aconnector;
6568         struct dm_connector_state *dm_conn_state;
6569         int i, j, clock, bpp;
6570         int vcpi, pbn_div, pbn = 0;
6571
6572         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6573
6574                 aconnector = to_amdgpu_dm_connector(connector);
6575
6576                 if (!aconnector->port)
6577                         continue;
6578
6579                 if (!new_con_state || !new_con_state->crtc)
6580                         continue;
6581
6582                 dm_conn_state = to_dm_connector_state(new_con_state);
6583
6584                 for (j = 0; j < dc_state->stream_count; j++) {
6585                         stream = dc_state->streams[j];
6586                         if (!stream)
6587                                 continue;
6588
6589                         if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6590                                 break;
6591
6592                         stream = NULL;
6593                 }
6594
6595                 if (!stream)
6596                         continue;
6597
6598                 if (stream->timing.flags.DSC != 1) {
6599                         drm_dp_mst_atomic_enable_dsc(state,
6600                                                      aconnector->port,
6601                                                      dm_conn_state->pbn,
6602                                                      0,
6603                                                      false);
6604                         continue;
6605                 }
6606
6607                 pbn_div = dm_mst_get_pbn_divider(stream->link);
6608                 bpp = stream->timing.dsc_cfg.bits_per_pixel;
6609                 clock = stream->timing.pix_clk_100hz / 10;
6610                 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6611                 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6612                                                     aconnector->port,
6613                                                     pbn, pbn_div,
6614                                                     true);
6615                 if (vcpi < 0)
6616                         return vcpi;
6617
6618                 dm_conn_state->pbn = pbn;
6619                 dm_conn_state->vcpi_slots = vcpi;
6620         }
6621         return 0;
6622 }
6623 #endif
6624
6625 static void dm_drm_plane_reset(struct drm_plane *plane)
6626 {
6627         struct dm_plane_state *amdgpu_state = NULL;
6628
6629         if (plane->state)
6630                 plane->funcs->atomic_destroy_state(plane, plane->state);
6631
6632         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6633         WARN_ON(amdgpu_state == NULL);
6634
6635         if (amdgpu_state)
6636                 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6637 }
6638
6639 static struct drm_plane_state *
6640 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6641 {
6642         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6643
6644         old_dm_plane_state = to_dm_plane_state(plane->state);
6645         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6646         if (!dm_plane_state)
6647                 return NULL;
6648
6649         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6650
6651         if (old_dm_plane_state->dc_state) {
6652                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6653                 dc_plane_state_retain(dm_plane_state->dc_state);
6654         }
6655
6656         return &dm_plane_state->base;
6657 }
6658
6659 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6660                                 struct drm_plane_state *state)
6661 {
6662         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6663
6664         if (dm_plane_state->dc_state)
6665                 dc_plane_state_release(dm_plane_state->dc_state);
6666
6667         drm_atomic_helper_plane_destroy_state(plane, state);
6668 }
6669
6670 static const struct drm_plane_funcs dm_plane_funcs = {
6671         .update_plane   = drm_atomic_helper_update_plane,
6672         .disable_plane  = drm_atomic_helper_disable_plane,
6673         .destroy        = drm_primary_helper_destroy,
6674         .reset = dm_drm_plane_reset,
6675         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6676         .atomic_destroy_state = dm_drm_plane_destroy_state,
6677         .format_mod_supported = dm_plane_format_mod_supported,
6678 };
6679
6680 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6681                                       struct drm_plane_state *new_state)
6682 {
6683         struct amdgpu_framebuffer *afb;
6684         struct drm_gem_object *obj;
6685         struct amdgpu_device *adev;
6686         struct amdgpu_bo *rbo;
6687         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6688         struct list_head list;
6689         struct ttm_validate_buffer tv;
6690         struct ww_acquire_ctx ticket;
6691         uint32_t domain;
6692         int r;
6693
6694         if (!new_state->fb) {
6695                 DRM_DEBUG_KMS("No FB bound\n");
6696                 return 0;
6697         }
6698
6699         afb = to_amdgpu_framebuffer(new_state->fb);
6700         obj = new_state->fb->obj[0];
6701         rbo = gem_to_amdgpu_bo(obj);
6702         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6703         INIT_LIST_HEAD(&list);
6704
6705         tv.bo = &rbo->tbo;
6706         tv.num_shared = 1;
6707         list_add(&tv.head, &list);
6708
6709         r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6710         if (r) {
6711                 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6712                 return r;
6713         }
6714
6715         if (plane->type != DRM_PLANE_TYPE_CURSOR)
6716                 domain = amdgpu_display_supported_domains(adev, rbo->flags);
6717         else
6718                 domain = AMDGPU_GEM_DOMAIN_VRAM;
6719
6720         r = amdgpu_bo_pin(rbo, domain);
6721         if (unlikely(r != 0)) {
6722                 if (r != -ERESTARTSYS)
6723                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6724                 ttm_eu_backoff_reservation(&ticket, &list);
6725                 return r;
6726         }
6727
6728         r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6729         if (unlikely(r != 0)) {
6730                 amdgpu_bo_unpin(rbo);
6731                 ttm_eu_backoff_reservation(&ticket, &list);
6732                 DRM_ERROR("%p bind failed\n", rbo);
6733                 return r;
6734         }
6735
6736         ttm_eu_backoff_reservation(&ticket, &list);
6737
6738         afb->address = amdgpu_bo_gpu_offset(rbo);
6739
6740         amdgpu_bo_ref(rbo);
6741
6742         /**
6743          * We don't do surface updates on planes that have been newly created,
6744          * but we also don't have the afb->address during atomic check.
6745          *
6746          * Fill in buffer attributes depending on the address here, but only on
6747          * newly created planes since they're not being used by DC yet and this
6748          * won't modify global state.
6749          */
6750         dm_plane_state_old = to_dm_plane_state(plane->state);
6751         dm_plane_state_new = to_dm_plane_state(new_state);
6752
6753         if (dm_plane_state_new->dc_state &&
6754             dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6755                 struct dc_plane_state *plane_state =
6756                         dm_plane_state_new->dc_state;
6757                 bool force_disable_dcc = !plane_state->dcc.enable;
6758
6759                 fill_plane_buffer_attributes(
6760                         adev, afb, plane_state->format, plane_state->rotation,
6761                         afb->tiling_flags,
6762                         &plane_state->tiling_info, &plane_state->plane_size,
6763                         &plane_state->dcc, &plane_state->address,
6764                         afb->tmz_surface, force_disable_dcc);
6765         }
6766
6767         return 0;
6768 }
6769
6770 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6771                                        struct drm_plane_state *old_state)
6772 {
6773         struct amdgpu_bo *rbo;
6774         int r;
6775
6776         if (!old_state->fb)
6777                 return;
6778
6779         rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6780         r = amdgpu_bo_reserve(rbo, false);
6781         if (unlikely(r)) {
6782                 DRM_ERROR("failed to reserve rbo before unpin\n");
6783                 return;
6784         }
6785
6786         amdgpu_bo_unpin(rbo);
6787         amdgpu_bo_unreserve(rbo);
6788         amdgpu_bo_unref(&rbo);
6789 }
6790
6791 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6792                                        struct drm_crtc_state *new_crtc_state)
6793 {
6794         struct drm_framebuffer *fb = state->fb;
6795         int min_downscale, max_upscale;
6796         int min_scale = 0;
6797         int max_scale = INT_MAX;
6798
6799         /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6800         if (fb && state->crtc) {
6801                 /* Validate viewport to cover the case when only the position changes */
6802                 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
6803                         int viewport_width = state->crtc_w;
6804                         int viewport_height = state->crtc_h;
6805
6806                         if (state->crtc_x < 0)
6807                                 viewport_width += state->crtc_x;
6808                         else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
6809                                 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
6810
6811                         if (state->crtc_y < 0)
6812                                 viewport_height += state->crtc_y;
6813                         else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
6814                                 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
6815
6816                         if (viewport_width < 0 || viewport_height < 0) {
6817                                 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
6818                                 return -EINVAL;
6819                         } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
6820                                 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
6821                                 return -EINVAL;
6822                         } else if (viewport_height < MIN_VIEWPORT_SIZE) {
6823                                 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
6824                                 return -EINVAL;
6825                         }
6826
6827                 }
6828
6829                 /* Get min/max allowed scaling factors from plane caps. */
6830                 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6831                                              &min_downscale, &max_upscale);
6832                 /*
6833                  * Convert to drm convention: 16.16 fixed point, instead of dc's
6834                  * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6835                  * dst/src, so min_scale = 1.0 / max_upscale, etc.
6836                  */
6837                 min_scale = (1000 << 16) / max_upscale;
6838                 max_scale = (1000 << 16) / min_downscale;
6839         }
6840
6841         return drm_atomic_helper_check_plane_state(
6842                 state, new_crtc_state, min_scale, max_scale, true, true);
6843 }
6844
6845 static int dm_plane_atomic_check(struct drm_plane *plane,
6846                                  struct drm_atomic_state *state)
6847 {
6848         struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
6849                                                                                  plane);
6850         struct amdgpu_device *adev = drm_to_adev(plane->dev);
6851         struct dc *dc = adev->dm.dc;
6852         struct dm_plane_state *dm_plane_state;
6853         struct dc_scaling_info scaling_info;
6854         struct drm_crtc_state *new_crtc_state;
6855         int ret;
6856
6857         trace_amdgpu_dm_plane_atomic_check(new_plane_state);
6858
6859         dm_plane_state = to_dm_plane_state(new_plane_state);
6860
6861         if (!dm_plane_state->dc_state)
6862                 return 0;
6863
6864         new_crtc_state =
6865                 drm_atomic_get_new_crtc_state(state,
6866                                               new_plane_state->crtc);
6867         if (!new_crtc_state)
6868                 return -EINVAL;
6869
6870         ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
6871         if (ret)
6872                 return ret;
6873
6874         ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
6875         if (ret)
6876                 return ret;
6877
6878         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6879                 return 0;
6880
6881         return -EINVAL;
6882 }
6883
6884 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6885                                        struct drm_atomic_state *state)
6886 {
6887         /* Only support async updates on cursor planes. */
6888         if (plane->type != DRM_PLANE_TYPE_CURSOR)
6889                 return -EINVAL;
6890
6891         return 0;
6892 }
6893
6894 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6895                                          struct drm_atomic_state *state)
6896 {
6897         struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
6898                                                                            plane);
6899         struct drm_plane_state *old_state =
6900                 drm_atomic_get_old_plane_state(state, plane);
6901
6902         trace_amdgpu_dm_atomic_update_cursor(new_state);
6903
6904         swap(plane->state->fb, new_state->fb);
6905
6906         plane->state->src_x = new_state->src_x;
6907         plane->state->src_y = new_state->src_y;
6908         plane->state->src_w = new_state->src_w;
6909         plane->state->src_h = new_state->src_h;
6910         plane->state->crtc_x = new_state->crtc_x;
6911         plane->state->crtc_y = new_state->crtc_y;
6912         plane->state->crtc_w = new_state->crtc_w;
6913         plane->state->crtc_h = new_state->crtc_h;
6914
6915         handle_cursor_update(plane, old_state);
6916 }
6917
6918 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6919         .prepare_fb = dm_plane_helper_prepare_fb,
6920         .cleanup_fb = dm_plane_helper_cleanup_fb,
6921         .atomic_check = dm_plane_atomic_check,
6922         .atomic_async_check = dm_plane_atomic_async_check,
6923         .atomic_async_update = dm_plane_atomic_async_update
6924 };
6925
6926 /*
6927  * TODO: these are currently initialized to rgb formats only.
6928  * For future use cases we should either initialize them dynamically based on
6929  * plane capabilities, or initialize this array to all formats, so internal drm
6930  * check will succeed, and let DC implement proper check
6931  */
6932 static const uint32_t rgb_formats[] = {
6933         DRM_FORMAT_XRGB8888,
6934         DRM_FORMAT_ARGB8888,
6935         DRM_FORMAT_RGBA8888,
6936         DRM_FORMAT_XRGB2101010,
6937         DRM_FORMAT_XBGR2101010,
6938         DRM_FORMAT_ARGB2101010,
6939         DRM_FORMAT_ABGR2101010,
6940         DRM_FORMAT_XBGR8888,
6941         DRM_FORMAT_ABGR8888,
6942         DRM_FORMAT_RGB565,
6943 };
6944
6945 static const uint32_t overlay_formats[] = {
6946         DRM_FORMAT_XRGB8888,
6947         DRM_FORMAT_ARGB8888,
6948         DRM_FORMAT_RGBA8888,
6949         DRM_FORMAT_XBGR8888,
6950         DRM_FORMAT_ABGR8888,
6951         DRM_FORMAT_RGB565
6952 };
6953
6954 static const u32 cursor_formats[] = {
6955         DRM_FORMAT_ARGB8888
6956 };
6957
6958 static int get_plane_formats(const struct drm_plane *plane,
6959                              const struct dc_plane_cap *plane_cap,
6960                              uint32_t *formats, int max_formats)
6961 {
6962         int i, num_formats = 0;
6963
6964         /*
6965          * TODO: Query support for each group of formats directly from
6966          * DC plane caps. This will require adding more formats to the
6967          * caps list.
6968          */
6969
6970         switch (plane->type) {
6971         case DRM_PLANE_TYPE_PRIMARY:
6972                 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6973                         if (num_formats >= max_formats)
6974                                 break;
6975
6976                         formats[num_formats++] = rgb_formats[i];
6977                 }
6978
6979                 if (plane_cap && plane_cap->pixel_format_support.nv12)
6980                         formats[num_formats++] = DRM_FORMAT_NV12;
6981                 if (plane_cap && plane_cap->pixel_format_support.p010)
6982                         formats[num_formats++] = DRM_FORMAT_P010;
6983                 if (plane_cap && plane_cap->pixel_format_support.fp16) {
6984                         formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6985                         formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6986                         formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6987                         formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6988                 }
6989                 break;
6990
6991         case DRM_PLANE_TYPE_OVERLAY:
6992                 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6993                         if (num_formats >= max_formats)
6994                                 break;
6995
6996                         formats[num_formats++] = overlay_formats[i];
6997                 }
6998                 break;
6999
7000         case DRM_PLANE_TYPE_CURSOR:
7001                 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7002                         if (num_formats >= max_formats)
7003                                 break;
7004
7005                         formats[num_formats++] = cursor_formats[i];
7006                 }
7007                 break;
7008         }
7009
7010         return num_formats;
7011 }
7012
7013 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7014                                 struct drm_plane *plane,
7015                                 unsigned long possible_crtcs,
7016                                 const struct dc_plane_cap *plane_cap)
7017 {
7018         uint32_t formats[32];
7019         int num_formats;
7020         int res = -EPERM;
7021         unsigned int supported_rotations;
7022         uint64_t *modifiers = NULL;
7023
7024         num_formats = get_plane_formats(plane, plane_cap, formats,
7025                                         ARRAY_SIZE(formats));
7026
7027         res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7028         if (res)
7029                 return res;
7030
7031         res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7032                                        &dm_plane_funcs, formats, num_formats,
7033                                        modifiers, plane->type, NULL);
7034         kfree(modifiers);
7035         if (res)
7036                 return res;
7037
7038         if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7039             plane_cap && plane_cap->per_pixel_alpha) {
7040                 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7041                                           BIT(DRM_MODE_BLEND_PREMULTI);
7042
7043                 drm_plane_create_alpha_property(plane);
7044                 drm_plane_create_blend_mode_property(plane, blend_caps);
7045         }
7046
7047         if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7048             plane_cap &&
7049             (plane_cap->pixel_format_support.nv12 ||
7050              plane_cap->pixel_format_support.p010)) {
7051                 /* This only affects YUV formats. */
7052                 drm_plane_create_color_properties(
7053                         plane,
7054                         BIT(DRM_COLOR_YCBCR_BT601) |
7055                         BIT(DRM_COLOR_YCBCR_BT709) |
7056                         BIT(DRM_COLOR_YCBCR_BT2020),
7057                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7058                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7059                         DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7060         }
7061
7062         supported_rotations =
7063                 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7064                 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7065
7066         if (dm->adev->asic_type >= CHIP_BONAIRE &&
7067             plane->type != DRM_PLANE_TYPE_CURSOR)
7068                 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7069                                                    supported_rotations);
7070
7071         drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7072
7073         /* Create (reset) the plane state */
7074         if (plane->funcs->reset)
7075                 plane->funcs->reset(plane);
7076
7077         return 0;
7078 }
7079
7080 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7081                                struct drm_plane *plane,
7082                                uint32_t crtc_index)
7083 {
7084         struct amdgpu_crtc *acrtc = NULL;
7085         struct drm_plane *cursor_plane;
7086
7087         int res = -ENOMEM;
7088
7089         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7090         if (!cursor_plane)
7091                 goto fail;
7092
7093         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7094         res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7095
7096         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7097         if (!acrtc)
7098                 goto fail;
7099
7100         res = drm_crtc_init_with_planes(
7101                         dm->ddev,
7102                         &acrtc->base,
7103                         plane,
7104                         cursor_plane,
7105                         &amdgpu_dm_crtc_funcs, NULL);
7106
7107         if (res)
7108                 goto fail;
7109
7110         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7111
7112         /* Create (reset) the plane state */
7113         if (acrtc->base.funcs->reset)
7114                 acrtc->base.funcs->reset(&acrtc->base);
7115
7116         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7117         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7118
7119         acrtc->crtc_id = crtc_index;
7120         acrtc->base.enabled = false;
7121         acrtc->otg_inst = -1;
7122
7123         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7124         drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7125                                    true, MAX_COLOR_LUT_ENTRIES);
7126         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7127
7128         return 0;
7129
7130 fail:
7131         kfree(acrtc);
7132         kfree(cursor_plane);
7133         return res;
7134 }
7135
7136
7137 static int to_drm_connector_type(enum signal_type st)
7138 {
7139         switch (st) {
7140         case SIGNAL_TYPE_HDMI_TYPE_A:
7141                 return DRM_MODE_CONNECTOR_HDMIA;
7142         case SIGNAL_TYPE_EDP:
7143                 return DRM_MODE_CONNECTOR_eDP;
7144         case SIGNAL_TYPE_LVDS:
7145                 return DRM_MODE_CONNECTOR_LVDS;
7146         case SIGNAL_TYPE_RGB:
7147                 return DRM_MODE_CONNECTOR_VGA;
7148         case SIGNAL_TYPE_DISPLAY_PORT:
7149         case SIGNAL_TYPE_DISPLAY_PORT_MST:
7150                 return DRM_MODE_CONNECTOR_DisplayPort;
7151         case SIGNAL_TYPE_DVI_DUAL_LINK:
7152         case SIGNAL_TYPE_DVI_SINGLE_LINK:
7153                 return DRM_MODE_CONNECTOR_DVID;
7154         case SIGNAL_TYPE_VIRTUAL:
7155                 return DRM_MODE_CONNECTOR_VIRTUAL;
7156
7157         default:
7158                 return DRM_MODE_CONNECTOR_Unknown;
7159         }
7160 }
7161
7162 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7163 {
7164         struct drm_encoder *encoder;
7165
7166         /* There is only one encoder per connector */
7167         drm_connector_for_each_possible_encoder(connector, encoder)
7168                 return encoder;
7169
7170         return NULL;
7171 }
7172
7173 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7174 {
7175         struct drm_encoder *encoder;
7176         struct amdgpu_encoder *amdgpu_encoder;
7177
7178         encoder = amdgpu_dm_connector_to_encoder(connector);
7179
7180         if (encoder == NULL)
7181                 return;
7182
7183         amdgpu_encoder = to_amdgpu_encoder(encoder);
7184
7185         amdgpu_encoder->native_mode.clock = 0;
7186
7187         if (!list_empty(&connector->probed_modes)) {
7188                 struct drm_display_mode *preferred_mode = NULL;
7189
7190                 list_for_each_entry(preferred_mode,
7191                                     &connector->probed_modes,
7192                                     head) {
7193                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7194                                 amdgpu_encoder->native_mode = *preferred_mode;
7195
7196                         break;
7197                 }
7198
7199         }
7200 }
7201
7202 static struct drm_display_mode *
7203 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7204                              char *name,
7205                              int hdisplay, int vdisplay)
7206 {
7207         struct drm_device *dev = encoder->dev;
7208         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7209         struct drm_display_mode *mode = NULL;
7210         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7211
7212         mode = drm_mode_duplicate(dev, native_mode);
7213
7214         if (mode == NULL)
7215                 return NULL;
7216
7217         mode->hdisplay = hdisplay;
7218         mode->vdisplay = vdisplay;
7219         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7220         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7221
7222         return mode;
7223
7224 }
7225
7226 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7227                                                  struct drm_connector *connector)
7228 {
7229         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7230         struct drm_display_mode *mode = NULL;
7231         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7232         struct amdgpu_dm_connector *amdgpu_dm_connector =
7233                                 to_amdgpu_dm_connector(connector);
7234         int i;
7235         int n;
7236         struct mode_size {
7237                 char name[DRM_DISPLAY_MODE_LEN];
7238                 int w;
7239                 int h;
7240         } common_modes[] = {
7241                 {  "640x480",  640,  480},
7242                 {  "800x600",  800,  600},
7243                 { "1024x768", 1024,  768},
7244                 { "1280x720", 1280,  720},
7245                 { "1280x800", 1280,  800},
7246                 {"1280x1024", 1280, 1024},
7247                 { "1440x900", 1440,  900},
7248                 {"1680x1050", 1680, 1050},
7249                 {"1600x1200", 1600, 1200},
7250                 {"1920x1080", 1920, 1080},
7251                 {"1920x1200", 1920, 1200}
7252         };
7253
7254         n = ARRAY_SIZE(common_modes);
7255
7256         for (i = 0; i < n; i++) {
7257                 struct drm_display_mode *curmode = NULL;
7258                 bool mode_existed = false;
7259
7260                 if (common_modes[i].w > native_mode->hdisplay ||
7261                     common_modes[i].h > native_mode->vdisplay ||
7262                    (common_modes[i].w == native_mode->hdisplay &&
7263                     common_modes[i].h == native_mode->vdisplay))
7264                         continue;
7265
7266                 list_for_each_entry(curmode, &connector->probed_modes, head) {
7267                         if (common_modes[i].w == curmode->hdisplay &&
7268                             common_modes[i].h == curmode->vdisplay) {
7269                                 mode_existed = true;
7270                                 break;
7271                         }
7272                 }
7273
7274                 if (mode_existed)
7275                         continue;
7276
7277                 mode = amdgpu_dm_create_common_mode(encoder,
7278                                 common_modes[i].name, common_modes[i].w,
7279                                 common_modes[i].h);
7280                 drm_mode_probed_add(connector, mode);
7281                 amdgpu_dm_connector->num_modes++;
7282         }
7283 }
7284
7285 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7286                                               struct edid *edid)
7287 {
7288         struct amdgpu_dm_connector *amdgpu_dm_connector =
7289                         to_amdgpu_dm_connector(connector);
7290
7291         if (edid) {
7292                 /* empty probed_modes */
7293                 INIT_LIST_HEAD(&connector->probed_modes);
7294                 amdgpu_dm_connector->num_modes =
7295                                 drm_add_edid_modes(connector, edid);
7296
7297                 /* sorting the probed modes before calling function
7298                  * amdgpu_dm_get_native_mode() since EDID can have
7299                  * more than one preferred mode. The modes that are
7300                  * later in the probed mode list could be of higher
7301                  * and preferred resolution. For example, 3840x2160
7302                  * resolution in base EDID preferred timing and 4096x2160
7303                  * preferred resolution in DID extension block later.
7304                  */
7305                 drm_mode_sort(&connector->probed_modes);
7306                 amdgpu_dm_get_native_mode(connector);
7307
7308                 /* Freesync capabilities are reset by calling
7309                  * drm_add_edid_modes() and need to be
7310                  * restored here.
7311                  */
7312                 amdgpu_dm_update_freesync_caps(connector, edid);
7313         } else {
7314                 amdgpu_dm_connector->num_modes = 0;
7315         }
7316 }
7317
7318 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7319                               struct drm_display_mode *mode)
7320 {
7321         struct drm_display_mode *m;
7322
7323         list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7324                 if (drm_mode_equal(m, mode))
7325                         return true;
7326         }
7327
7328         return false;
7329 }
7330
7331 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7332 {
7333         const struct drm_display_mode *m;
7334         struct drm_display_mode *new_mode;
7335         uint i;
7336         uint32_t new_modes_count = 0;
7337
7338         /* Standard FPS values
7339          *
7340          * 23.976   - TV/NTSC
7341          * 24       - Cinema
7342          * 25       - TV/PAL
7343          * 29.97    - TV/NTSC
7344          * 30       - TV/NTSC
7345          * 48       - Cinema HFR
7346          * 50       - TV/PAL
7347          * 60       - Commonly used
7348          * 48,72,96 - Multiples of 24
7349          */
7350         const uint32_t common_rates[] = { 23976, 24000, 25000, 29970, 30000,
7351                                          48000, 50000, 60000, 72000, 96000 };
7352
7353         /*
7354          * Find mode with highest refresh rate with the same resolution
7355          * as the preferred mode. Some monitors report a preferred mode
7356          * with lower resolution than the highest refresh rate supported.
7357          */
7358
7359         m = get_highest_refresh_rate_mode(aconnector, true);
7360         if (!m)
7361                 return 0;
7362
7363         for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7364                 uint64_t target_vtotal, target_vtotal_diff;
7365                 uint64_t num, den;
7366
7367                 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7368                         continue;
7369
7370                 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7371                     common_rates[i] > aconnector->max_vfreq * 1000)
7372                         continue;
7373
7374                 num = (unsigned long long)m->clock * 1000 * 1000;
7375                 den = common_rates[i] * (unsigned long long)m->htotal;
7376                 target_vtotal = div_u64(num, den);
7377                 target_vtotal_diff = target_vtotal - m->vtotal;
7378
7379                 /* Check for illegal modes */
7380                 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7381                     m->vsync_end + target_vtotal_diff < m->vsync_start ||
7382                     m->vtotal + target_vtotal_diff < m->vsync_end)
7383                         continue;
7384
7385                 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7386                 if (!new_mode)
7387                         goto out;
7388
7389                 new_mode->vtotal += (u16)target_vtotal_diff;
7390                 new_mode->vsync_start += (u16)target_vtotal_diff;
7391                 new_mode->vsync_end += (u16)target_vtotal_diff;
7392                 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7393                 new_mode->type |= DRM_MODE_TYPE_DRIVER;
7394
7395                 if (!is_duplicate_mode(aconnector, new_mode)) {
7396                         drm_mode_probed_add(&aconnector->base, new_mode);
7397                         new_modes_count += 1;
7398                 } else
7399                         drm_mode_destroy(aconnector->base.dev, new_mode);
7400         }
7401  out:
7402         return new_modes_count;
7403 }
7404
7405 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7406                                                    struct edid *edid)
7407 {
7408         struct amdgpu_dm_connector *amdgpu_dm_connector =
7409                 to_amdgpu_dm_connector(connector);
7410
7411         if (!(amdgpu_freesync_vid_mode && edid))
7412                 return;
7413
7414         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7415                 amdgpu_dm_connector->num_modes +=
7416                         add_fs_modes(amdgpu_dm_connector);
7417 }
7418
7419 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
7420 {
7421         struct amdgpu_dm_connector *amdgpu_dm_connector =
7422                         to_amdgpu_dm_connector(connector);
7423         struct drm_encoder *encoder;
7424         struct edid *edid = amdgpu_dm_connector->edid;
7425
7426         encoder = amdgpu_dm_connector_to_encoder(connector);
7427
7428         if (!drm_edid_is_valid(edid)) {
7429                 amdgpu_dm_connector->num_modes =
7430                                 drm_add_modes_noedid(connector, 640, 480);
7431         } else {
7432                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
7433                 amdgpu_dm_connector_add_common_modes(encoder, connector);
7434                 amdgpu_dm_connector_add_freesync_modes(connector, edid);
7435         }
7436         amdgpu_dm_fbc_init(connector);
7437
7438         return amdgpu_dm_connector->num_modes;
7439 }
7440
7441 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7442                                      struct amdgpu_dm_connector *aconnector,
7443                                      int connector_type,
7444                                      struct dc_link *link,
7445                                      int link_index)
7446 {
7447         struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7448
7449         /*
7450          * Some of the properties below require access to state, like bpc.
7451          * Allocate some default initial connector state with our reset helper.
7452          */
7453         if (aconnector->base.funcs->reset)
7454                 aconnector->base.funcs->reset(&aconnector->base);
7455
7456         aconnector->connector_id = link_index;
7457         aconnector->dc_link = link;
7458         aconnector->base.interlace_allowed = false;
7459         aconnector->base.doublescan_allowed = false;
7460         aconnector->base.stereo_allowed = false;
7461         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7462         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7463         aconnector->audio_inst = -1;
7464         mutex_init(&aconnector->hpd_lock);
7465
7466         /*
7467          * configure support HPD hot plug connector_>polled default value is 0
7468          * which means HPD hot plug not supported
7469          */
7470         switch (connector_type) {
7471         case DRM_MODE_CONNECTOR_HDMIA:
7472                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7473                 aconnector->base.ycbcr_420_allowed =
7474                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7475                 break;
7476         case DRM_MODE_CONNECTOR_DisplayPort:
7477                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7478                 aconnector->base.ycbcr_420_allowed =
7479                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
7480                 break;
7481         case DRM_MODE_CONNECTOR_DVID:
7482                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7483                 break;
7484         default:
7485                 break;
7486         }
7487
7488         drm_object_attach_property(&aconnector->base.base,
7489                                 dm->ddev->mode_config.scaling_mode_property,
7490                                 DRM_MODE_SCALE_NONE);
7491
7492         drm_object_attach_property(&aconnector->base.base,
7493                                 adev->mode_info.underscan_property,
7494                                 UNDERSCAN_OFF);
7495         drm_object_attach_property(&aconnector->base.base,
7496                                 adev->mode_info.underscan_hborder_property,
7497                                 0);
7498         drm_object_attach_property(&aconnector->base.base,
7499                                 adev->mode_info.underscan_vborder_property,
7500                                 0);
7501
7502         if (!aconnector->mst_port)
7503                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7504
7505         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
7506         aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7507         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7508
7509         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7510             (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7511                 drm_object_attach_property(&aconnector->base.base,
7512                                 adev->mode_info.abm_level_property, 0);
7513         }
7514
7515         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7516             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7517             connector_type == DRM_MODE_CONNECTOR_eDP) {
7518                 drm_object_attach_property(
7519                         &aconnector->base.base,
7520                         dm->ddev->mode_config.hdr_output_metadata_property, 0);
7521
7522                 if (!aconnector->mst_port)
7523                         drm_connector_attach_vrr_capable_property(&aconnector->base);
7524
7525 #ifdef CONFIG_DRM_AMD_DC_HDCP
7526                 if (adev->dm.hdcp_workqueue)
7527                         drm_connector_attach_content_protection_property(&aconnector->base, true);
7528 #endif
7529         }
7530 }
7531
7532 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7533                               struct i2c_msg *msgs, int num)
7534 {
7535         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7536         struct ddc_service *ddc_service = i2c->ddc_service;
7537         struct i2c_command cmd;
7538         int i;
7539         int result = -EIO;
7540
7541         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7542
7543         if (!cmd.payloads)
7544                 return result;
7545
7546         cmd.number_of_payloads = num;
7547         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7548         cmd.speed = 100;
7549
7550         for (i = 0; i < num; i++) {
7551                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7552                 cmd.payloads[i].address = msgs[i].addr;
7553                 cmd.payloads[i].length = msgs[i].len;
7554                 cmd.payloads[i].data = msgs[i].buf;
7555         }
7556
7557         if (dc_submit_i2c(
7558                         ddc_service->ctx->dc,
7559                         ddc_service->ddc_pin->hw_info.ddc_channel,
7560                         &cmd))
7561                 result = num;
7562
7563         kfree(cmd.payloads);
7564         return result;
7565 }
7566
7567 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7568 {
7569         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7570 }
7571
7572 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7573         .master_xfer = amdgpu_dm_i2c_xfer,
7574         .functionality = amdgpu_dm_i2c_func,
7575 };
7576
7577 static struct amdgpu_i2c_adapter *
7578 create_i2c(struct ddc_service *ddc_service,
7579            int link_index,
7580            int *res)
7581 {
7582         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7583         struct amdgpu_i2c_adapter *i2c;
7584
7585         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7586         if (!i2c)
7587                 return NULL;
7588         i2c->base.owner = THIS_MODULE;
7589         i2c->base.class = I2C_CLASS_DDC;
7590         i2c->base.dev.parent = &adev->pdev->dev;
7591         i2c->base.algo = &amdgpu_dm_i2c_algo;
7592         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7593         i2c_set_adapdata(&i2c->base, i2c);
7594         i2c->ddc_service = ddc_service;
7595         i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7596
7597         return i2c;
7598 }
7599
7600
7601 /*
7602  * Note: this function assumes that dc_link_detect() was called for the
7603  * dc_link which will be represented by this aconnector.
7604  */
7605 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7606                                     struct amdgpu_dm_connector *aconnector,
7607                                     uint32_t link_index,
7608                                     struct amdgpu_encoder *aencoder)
7609 {
7610         int res = 0;
7611         int connector_type;
7612         struct dc *dc = dm->dc;
7613         struct dc_link *link = dc_get_link_at_index(dc, link_index);
7614         struct amdgpu_i2c_adapter *i2c;
7615
7616         link->priv = aconnector;
7617
7618         DRM_DEBUG_DRIVER("%s()\n", __func__);
7619
7620         i2c = create_i2c(link->ddc, link->link_index, &res);
7621         if (!i2c) {
7622                 DRM_ERROR("Failed to create i2c adapter data\n");
7623                 return -ENOMEM;
7624         }
7625
7626         aconnector->i2c = i2c;
7627         res = i2c_add_adapter(&i2c->base);
7628
7629         if (res) {
7630                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7631                 goto out_free;
7632         }
7633
7634         connector_type = to_drm_connector_type(link->connector_signal);
7635
7636         res = drm_connector_init_with_ddc(
7637                         dm->ddev,
7638                         &aconnector->base,
7639                         &amdgpu_dm_connector_funcs,
7640                         connector_type,
7641                         &i2c->base);
7642
7643         if (res) {
7644                 DRM_ERROR("connector_init failed\n");
7645                 aconnector->connector_id = -1;
7646                 goto out_free;
7647         }
7648
7649         drm_connector_helper_add(
7650                         &aconnector->base,
7651                         &amdgpu_dm_connector_helper_funcs);
7652
7653         amdgpu_dm_connector_init_helper(
7654                 dm,
7655                 aconnector,
7656                 connector_type,
7657                 link,
7658                 link_index);
7659
7660         drm_connector_attach_encoder(
7661                 &aconnector->base, &aencoder->base);
7662
7663         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7664                 || connector_type == DRM_MODE_CONNECTOR_eDP)
7665                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7666
7667 out_free:
7668         if (res) {
7669                 kfree(i2c);
7670                 aconnector->i2c = NULL;
7671         }
7672         return res;
7673 }
7674
7675 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7676 {
7677         switch (adev->mode_info.num_crtc) {
7678         case 1:
7679                 return 0x1;
7680         case 2:
7681                 return 0x3;
7682         case 3:
7683                 return 0x7;
7684         case 4:
7685                 return 0xf;
7686         case 5:
7687                 return 0x1f;
7688         case 6:
7689         default:
7690                 return 0x3f;
7691         }
7692 }
7693
7694 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7695                                   struct amdgpu_encoder *aencoder,
7696                                   uint32_t link_index)
7697 {
7698         struct amdgpu_device *adev = drm_to_adev(dev);
7699
7700         int res = drm_encoder_init(dev,
7701                                    &aencoder->base,
7702                                    &amdgpu_dm_encoder_funcs,
7703                                    DRM_MODE_ENCODER_TMDS,
7704                                    NULL);
7705
7706         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7707
7708         if (!res)
7709                 aencoder->encoder_id = link_index;
7710         else
7711                 aencoder->encoder_id = -1;
7712
7713         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7714
7715         return res;
7716 }
7717
7718 static void manage_dm_interrupts(struct amdgpu_device *adev,
7719                                  struct amdgpu_crtc *acrtc,
7720                                  bool enable)
7721 {
7722         /*
7723          * We have no guarantee that the frontend index maps to the same
7724          * backend index - some even map to more than one.
7725          *
7726          * TODO: Use a different interrupt or check DC itself for the mapping.
7727          */
7728         int irq_type =
7729                 amdgpu_display_crtc_idx_to_irq_type(
7730                         adev,
7731                         acrtc->crtc_id);
7732
7733         if (enable) {
7734                 drm_crtc_vblank_on(&acrtc->base);
7735                 amdgpu_irq_get(
7736                         adev,
7737                         &adev->pageflip_irq,
7738                         irq_type);
7739 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7740                 amdgpu_irq_get(
7741                         adev,
7742                         &adev->vline0_irq,
7743                         irq_type);
7744 #endif
7745         } else {
7746 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7747                 amdgpu_irq_put(
7748                         adev,
7749                         &adev->vline0_irq,
7750                         irq_type);
7751 #endif
7752                 amdgpu_irq_put(
7753                         adev,
7754                         &adev->pageflip_irq,
7755                         irq_type);
7756                 drm_crtc_vblank_off(&acrtc->base);
7757         }
7758 }
7759
7760 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7761                                       struct amdgpu_crtc *acrtc)
7762 {
7763         int irq_type =
7764                 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7765
7766         /**
7767          * This reads the current state for the IRQ and force reapplies
7768          * the setting to hardware.
7769          */
7770         amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7771 }
7772
7773 static bool
7774 is_scaling_state_different(const struct dm_connector_state *dm_state,
7775                            const struct dm_connector_state *old_dm_state)
7776 {
7777         if (dm_state->scaling != old_dm_state->scaling)
7778                 return true;
7779         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7780                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7781                         return true;
7782         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7783                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7784                         return true;
7785         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7786                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7787                 return true;
7788         return false;
7789 }
7790
7791 #ifdef CONFIG_DRM_AMD_DC_HDCP
7792 static bool is_content_protection_different(struct drm_connector_state *state,
7793                                             const struct drm_connector_state *old_state,
7794                                             const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7795 {
7796         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7797         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7798
7799         /* Handle: Type0/1 change */
7800         if (old_state->hdcp_content_type != state->hdcp_content_type &&
7801             state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7802                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7803                 return true;
7804         }
7805
7806         /* CP is being re enabled, ignore this
7807          *
7808          * Handles:     ENABLED -> DESIRED
7809          */
7810         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7811             state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7812                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7813                 return false;
7814         }
7815
7816         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7817          *
7818          * Handles:     UNDESIRED -> ENABLED
7819          */
7820         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7821             state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7822                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7823
7824         /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7825          * hot-plug, headless s3, dpms
7826          *
7827          * Handles:     DESIRED -> DESIRED (Special case)
7828          */
7829         if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7830             connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7831                 dm_con_state->update_hdcp = false;
7832                 return true;
7833         }
7834
7835         /*
7836          * Handles:     UNDESIRED -> UNDESIRED
7837          *              DESIRED -> DESIRED
7838          *              ENABLED -> ENABLED
7839          */
7840         if (old_state->content_protection == state->content_protection)
7841                 return false;
7842
7843         /*
7844          * Handles:     UNDESIRED -> DESIRED
7845          *              DESIRED -> UNDESIRED
7846          *              ENABLED -> UNDESIRED
7847          */
7848         if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7849                 return true;
7850
7851         /*
7852          * Handles:     DESIRED -> ENABLED
7853          */
7854         return false;
7855 }
7856
7857 #endif
7858 static void remove_stream(struct amdgpu_device *adev,
7859                           struct amdgpu_crtc *acrtc,
7860                           struct dc_stream_state *stream)
7861 {
7862         /* this is the update mode case */
7863
7864         acrtc->otg_inst = -1;
7865         acrtc->enabled = false;
7866 }
7867
7868 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7869                                struct dc_cursor_position *position)
7870 {
7871         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7872         int x, y;
7873         int xorigin = 0, yorigin = 0;
7874
7875         if (!crtc || !plane->state->fb)
7876                 return 0;
7877
7878         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7879             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7880                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7881                           __func__,
7882                           plane->state->crtc_w,
7883                           plane->state->crtc_h);
7884                 return -EINVAL;
7885         }
7886
7887         x = plane->state->crtc_x;
7888         y = plane->state->crtc_y;
7889
7890         if (x <= -amdgpu_crtc->max_cursor_width ||
7891             y <= -amdgpu_crtc->max_cursor_height)
7892                 return 0;
7893
7894         if (x < 0) {
7895                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
7896                 x = 0;
7897         }
7898         if (y < 0) {
7899                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
7900                 y = 0;
7901         }
7902         position->enable = true;
7903         position->translate_by_source = true;
7904         position->x = x;
7905         position->y = y;
7906         position->x_hotspot = xorigin;
7907         position->y_hotspot = yorigin;
7908
7909         return 0;
7910 }
7911
7912 static void handle_cursor_update(struct drm_plane *plane,
7913                                  struct drm_plane_state *old_plane_state)
7914 {
7915         struct amdgpu_device *adev = drm_to_adev(plane->dev);
7916         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
7917         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
7918         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
7919         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7920         uint64_t address = afb ? afb->address : 0;
7921         struct dc_cursor_position position = {0};
7922         struct dc_cursor_attributes attributes;
7923         int ret;
7924
7925         if (!plane->state->fb && !old_plane_state->fb)
7926                 return;
7927
7928         DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
7929                       __func__,
7930                       amdgpu_crtc->crtc_id,
7931                       plane->state->crtc_w,
7932                       plane->state->crtc_h);
7933
7934         ret = get_cursor_position(plane, crtc, &position);
7935         if (ret)
7936                 return;
7937
7938         if (!position.enable) {
7939                 /* turn off cursor */
7940                 if (crtc_state && crtc_state->stream) {
7941                         mutex_lock(&adev->dm.dc_lock);
7942                         dc_stream_set_cursor_position(crtc_state->stream,
7943                                                       &position);
7944                         mutex_unlock(&adev->dm.dc_lock);
7945                 }
7946                 return;
7947         }
7948
7949         amdgpu_crtc->cursor_width = plane->state->crtc_w;
7950         amdgpu_crtc->cursor_height = plane->state->crtc_h;
7951
7952         memset(&attributes, 0, sizeof(attributes));
7953         attributes.address.high_part = upper_32_bits(address);
7954         attributes.address.low_part  = lower_32_bits(address);
7955         attributes.width             = plane->state->crtc_w;
7956         attributes.height            = plane->state->crtc_h;
7957         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
7958         attributes.rotation_angle    = 0;
7959         attributes.attribute_flags.value = 0;
7960
7961         attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
7962
7963         if (crtc_state->stream) {
7964                 mutex_lock(&adev->dm.dc_lock);
7965                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
7966                                                          &attributes))
7967                         DRM_ERROR("DC failed to set cursor attributes\n");
7968
7969                 if (!dc_stream_set_cursor_position(crtc_state->stream,
7970                                                    &position))
7971                         DRM_ERROR("DC failed to set cursor position\n");
7972                 mutex_unlock(&adev->dm.dc_lock);
7973         }
7974 }
7975
7976 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7977 {
7978
7979         assert_spin_locked(&acrtc->base.dev->event_lock);
7980         WARN_ON(acrtc->event);
7981
7982         acrtc->event = acrtc->base.state->event;
7983
7984         /* Set the flip status */
7985         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7986
7987         /* Mark this event as consumed */
7988         acrtc->base.state->event = NULL;
7989
7990         DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7991                      acrtc->crtc_id);
7992 }
7993
7994 static void update_freesync_state_on_stream(
7995         struct amdgpu_display_manager *dm,
7996         struct dm_crtc_state *new_crtc_state,
7997         struct dc_stream_state *new_stream,
7998         struct dc_plane_state *surface,
7999         u32 flip_timestamp_in_us)
8000 {
8001         struct mod_vrr_params vrr_params;
8002         struct dc_info_packet vrr_infopacket = {0};
8003         struct amdgpu_device *adev = dm->adev;
8004         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8005         unsigned long flags;
8006         bool pack_sdp_v1_3 = false;
8007
8008         if (!new_stream)
8009                 return;
8010
8011         /*
8012          * TODO: Determine why min/max totals and vrefresh can be 0 here.
8013          * For now it's sufficient to just guard against these conditions.
8014          */
8015
8016         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8017                 return;
8018
8019         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8020         vrr_params = acrtc->dm_irq_params.vrr_params;
8021
8022         if (surface) {
8023                 mod_freesync_handle_preflip(
8024                         dm->freesync_module,
8025                         surface,
8026                         new_stream,
8027                         flip_timestamp_in_us,
8028                         &vrr_params);
8029
8030                 if (adev->family < AMDGPU_FAMILY_AI &&
8031                     amdgpu_dm_vrr_active(new_crtc_state)) {
8032                         mod_freesync_handle_v_update(dm->freesync_module,
8033                                                      new_stream, &vrr_params);
8034
8035                         /* Need to call this before the frame ends. */
8036                         dc_stream_adjust_vmin_vmax(dm->dc,
8037                                                    new_crtc_state->stream,
8038                                                    &vrr_params.adjust);
8039                 }
8040         }
8041
8042         mod_freesync_build_vrr_infopacket(
8043                 dm->freesync_module,
8044                 new_stream,
8045                 &vrr_params,
8046                 PACKET_TYPE_VRR,
8047                 TRANSFER_FUNC_UNKNOWN,
8048                 &vrr_infopacket,
8049                 pack_sdp_v1_3);
8050
8051         new_crtc_state->freesync_timing_changed |=
8052                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8053                         &vrr_params.adjust,
8054                         sizeof(vrr_params.adjust)) != 0);
8055
8056         new_crtc_state->freesync_vrr_info_changed |=
8057                 (memcmp(&new_crtc_state->vrr_infopacket,
8058                         &vrr_infopacket,
8059                         sizeof(vrr_infopacket)) != 0);
8060
8061         acrtc->dm_irq_params.vrr_params = vrr_params;
8062         new_crtc_state->vrr_infopacket = vrr_infopacket;
8063
8064         new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8065         new_stream->vrr_infopacket = vrr_infopacket;
8066
8067         if (new_crtc_state->freesync_vrr_info_changed)
8068                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8069                               new_crtc_state->base.crtc->base.id,
8070                               (int)new_crtc_state->base.vrr_enabled,
8071                               (int)vrr_params.state);
8072
8073         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8074 }
8075
8076 static void update_stream_irq_parameters(
8077         struct amdgpu_display_manager *dm,
8078         struct dm_crtc_state *new_crtc_state)
8079 {
8080         struct dc_stream_state *new_stream = new_crtc_state->stream;
8081         struct mod_vrr_params vrr_params;
8082         struct mod_freesync_config config = new_crtc_state->freesync_config;
8083         struct amdgpu_device *adev = dm->adev;
8084         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8085         unsigned long flags;
8086
8087         if (!new_stream)
8088                 return;
8089
8090         /*
8091          * TODO: Determine why min/max totals and vrefresh can be 0 here.
8092          * For now it's sufficient to just guard against these conditions.
8093          */
8094         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8095                 return;
8096
8097         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8098         vrr_params = acrtc->dm_irq_params.vrr_params;
8099
8100         if (new_crtc_state->vrr_supported &&
8101             config.min_refresh_in_uhz &&
8102             config.max_refresh_in_uhz) {
8103                 /*
8104                  * if freesync compatible mode was set, config.state will be set
8105                  * in atomic check
8106                  */
8107                 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8108                     (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8109                      new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8110                         vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8111                         vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8112                         vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8113                         vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8114                 } else {
8115                         config.state = new_crtc_state->base.vrr_enabled ?
8116                                                      VRR_STATE_ACTIVE_VARIABLE :
8117                                                      VRR_STATE_INACTIVE;
8118                 }
8119         } else {
8120                 config.state = VRR_STATE_UNSUPPORTED;
8121         }
8122
8123         mod_freesync_build_vrr_params(dm->freesync_module,
8124                                       new_stream,
8125                                       &config, &vrr_params);
8126
8127         new_crtc_state->freesync_timing_changed |=
8128                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8129                         &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8130
8131         new_crtc_state->freesync_config = config;
8132         /* Copy state for access from DM IRQ handler */
8133         acrtc->dm_irq_params.freesync_config = config;
8134         acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8135         acrtc->dm_irq_params.vrr_params = vrr_params;
8136         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8137 }
8138
8139 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8140                                             struct dm_crtc_state *new_state)
8141 {
8142         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8143         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8144
8145         if (!old_vrr_active && new_vrr_active) {
8146                 /* Transition VRR inactive -> active:
8147                  * While VRR is active, we must not disable vblank irq, as a
8148                  * reenable after disable would compute bogus vblank/pflip
8149                  * timestamps if it likely happened inside display front-porch.
8150                  *
8151                  * We also need vupdate irq for the actual core vblank handling
8152                  * at end of vblank.
8153                  */
8154                 dm_set_vupdate_irq(new_state->base.crtc, true);
8155                 drm_crtc_vblank_get(new_state->base.crtc);
8156                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8157                                  __func__, new_state->base.crtc->base.id);
8158         } else if (old_vrr_active && !new_vrr_active) {
8159                 /* Transition VRR active -> inactive:
8160                  * Allow vblank irq disable again for fixed refresh rate.
8161                  */
8162                 dm_set_vupdate_irq(new_state->base.crtc, false);
8163                 drm_crtc_vblank_put(new_state->base.crtc);
8164                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8165                                  __func__, new_state->base.crtc->base.id);
8166         }
8167 }
8168
8169 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8170 {
8171         struct drm_plane *plane;
8172         struct drm_plane_state *old_plane_state, *new_plane_state;
8173         int i;
8174
8175         /*
8176          * TODO: Make this per-stream so we don't issue redundant updates for
8177          * commits with multiple streams.
8178          */
8179         for_each_oldnew_plane_in_state(state, plane, old_plane_state,
8180                                        new_plane_state, i)
8181                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8182                         handle_cursor_update(plane, old_plane_state);
8183 }
8184
8185 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8186                                     struct dc_state *dc_state,
8187                                     struct drm_device *dev,
8188                                     struct amdgpu_display_manager *dm,
8189                                     struct drm_crtc *pcrtc,
8190                                     bool wait_for_vblank)
8191 {
8192         uint32_t i;
8193         uint64_t timestamp_ns;
8194         struct drm_plane *plane;
8195         struct drm_plane_state *old_plane_state, *new_plane_state;
8196         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8197         struct drm_crtc_state *new_pcrtc_state =
8198                         drm_atomic_get_new_crtc_state(state, pcrtc);
8199         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8200         struct dm_crtc_state *dm_old_crtc_state =
8201                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8202         int planes_count = 0, vpos, hpos;
8203         long r;
8204         unsigned long flags;
8205         struct amdgpu_bo *abo;
8206         uint32_t target_vblank, last_flip_vblank;
8207         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8208         bool pflip_present = false;
8209         struct {
8210                 struct dc_surface_update surface_updates[MAX_SURFACES];
8211                 struct dc_plane_info plane_infos[MAX_SURFACES];
8212                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8213                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8214                 struct dc_stream_update stream_update;
8215         } *bundle;
8216
8217         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8218
8219         if (!bundle) {
8220                 dm_error("Failed to allocate update bundle\n");
8221                 goto cleanup;
8222         }
8223
8224         /*
8225          * Disable the cursor first if we're disabling all the planes.
8226          * It'll remain on the screen after the planes are re-enabled
8227          * if we don't.
8228          */
8229         if (acrtc_state->active_planes == 0)
8230                 amdgpu_dm_commit_cursors(state);
8231
8232         /* update planes when needed */
8233         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8234                 struct drm_crtc *crtc = new_plane_state->crtc;
8235                 struct drm_crtc_state *new_crtc_state;
8236                 struct drm_framebuffer *fb = new_plane_state->fb;
8237                 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8238                 bool plane_needs_flip;
8239                 struct dc_plane_state *dc_plane;
8240                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8241
8242                 /* Cursor plane is handled after stream updates */
8243                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8244                         continue;
8245
8246                 if (!fb || !crtc || pcrtc != crtc)
8247                         continue;
8248
8249                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8250                 if (!new_crtc_state->active)
8251                         continue;
8252
8253                 dc_plane = dm_new_plane_state->dc_state;
8254
8255                 bundle->surface_updates[planes_count].surface = dc_plane;
8256                 if (new_pcrtc_state->color_mgmt_changed) {
8257                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8258                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8259                         bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8260                 }
8261
8262                 fill_dc_scaling_info(new_plane_state,
8263                                      &bundle->scaling_infos[planes_count]);
8264
8265                 bundle->surface_updates[planes_count].scaling_info =
8266                         &bundle->scaling_infos[planes_count];
8267
8268                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8269
8270                 pflip_present = pflip_present || plane_needs_flip;
8271
8272                 if (!plane_needs_flip) {
8273                         planes_count += 1;
8274                         continue;
8275                 }
8276
8277                 abo = gem_to_amdgpu_bo(fb->obj[0]);
8278
8279                 /*
8280                  * Wait for all fences on this FB. Do limited wait to avoid
8281                  * deadlock during GPU reset when this fence will not signal
8282                  * but we hold reservation lock for the BO.
8283                  */
8284                 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
8285                                                         false,
8286                                                         msecs_to_jiffies(5000));
8287                 if (unlikely(r <= 0))
8288                         DRM_ERROR("Waiting for fences timed out!");
8289
8290                 fill_dc_plane_info_and_addr(
8291                         dm->adev, new_plane_state,
8292                         afb->tiling_flags,
8293                         &bundle->plane_infos[planes_count],
8294                         &bundle->flip_addrs[planes_count].address,
8295                         afb->tmz_surface, false);
8296
8297                 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
8298                                  new_plane_state->plane->index,
8299                                  bundle->plane_infos[planes_count].dcc.enable);
8300
8301                 bundle->surface_updates[planes_count].plane_info =
8302                         &bundle->plane_infos[planes_count];
8303
8304                 /*
8305                  * Only allow immediate flips for fast updates that don't
8306                  * change FB pitch, DCC state, rotation or mirroing.
8307                  */
8308                 bundle->flip_addrs[planes_count].flip_immediate =
8309                         crtc->state->async_flip &&
8310                         acrtc_state->update_type == UPDATE_TYPE_FAST;
8311
8312                 timestamp_ns = ktime_get_ns();
8313                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8314                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8315                 bundle->surface_updates[planes_count].surface = dc_plane;
8316
8317                 if (!bundle->surface_updates[planes_count].surface) {
8318                         DRM_ERROR("No surface for CRTC: id=%d\n",
8319                                         acrtc_attach->crtc_id);
8320                         continue;
8321                 }
8322
8323                 if (plane == pcrtc->primary)
8324                         update_freesync_state_on_stream(
8325                                 dm,
8326                                 acrtc_state,
8327                                 acrtc_state->stream,
8328                                 dc_plane,
8329                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
8330
8331                 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
8332                                  __func__,
8333                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8334                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
8335
8336                 planes_count += 1;
8337
8338         }
8339
8340         if (pflip_present) {
8341                 if (!vrr_active) {
8342                         /* Use old throttling in non-vrr fixed refresh rate mode
8343                          * to keep flip scheduling based on target vblank counts
8344                          * working in a backwards compatible way, e.g., for
8345                          * clients using the GLX_OML_sync_control extension or
8346                          * DRI3/Present extension with defined target_msc.
8347                          */
8348                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
8349                 }
8350                 else {
8351                         /* For variable refresh rate mode only:
8352                          * Get vblank of last completed flip to avoid > 1 vrr
8353                          * flips per video frame by use of throttling, but allow
8354                          * flip programming anywhere in the possibly large
8355                          * variable vrr vblank interval for fine-grained flip
8356                          * timing control and more opportunity to avoid stutter
8357                          * on late submission of flips.
8358                          */
8359                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8360                         last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
8361                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8362                 }
8363
8364                 target_vblank = last_flip_vblank + wait_for_vblank;
8365
8366                 /*
8367                  * Wait until we're out of the vertical blank period before the one
8368                  * targeted by the flip
8369                  */
8370                 while ((acrtc_attach->enabled &&
8371                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8372                                                             0, &vpos, &hpos, NULL,
8373                                                             NULL, &pcrtc->hwmode)
8374                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8375                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8376                         (int)(target_vblank -
8377                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8378                         usleep_range(1000, 1100);
8379                 }
8380
8381                 /**
8382                  * Prepare the flip event for the pageflip interrupt to handle.
8383                  *
8384                  * This only works in the case where we've already turned on the
8385                  * appropriate hardware blocks (eg. HUBP) so in the transition case
8386                  * from 0 -> n planes we have to skip a hardware generated event
8387                  * and rely on sending it from software.
8388                  */
8389                 if (acrtc_attach->base.state->event &&
8390                     acrtc_state->active_planes > 0) {
8391                         drm_crtc_vblank_get(pcrtc);
8392
8393                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8394
8395                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8396                         prepare_flip_isr(acrtc_attach);
8397
8398                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8399                 }
8400
8401                 if (acrtc_state->stream) {
8402                         if (acrtc_state->freesync_vrr_info_changed)
8403                                 bundle->stream_update.vrr_infopacket =
8404                                         &acrtc_state->stream->vrr_infopacket;
8405                 }
8406         }
8407
8408         /* Update the planes if changed or disable if we don't have any. */
8409         if ((planes_count || acrtc_state->active_planes == 0) &&
8410                 acrtc_state->stream) {
8411                 bundle->stream_update.stream = acrtc_state->stream;
8412                 if (new_pcrtc_state->mode_changed) {
8413                         bundle->stream_update.src = acrtc_state->stream->src;
8414                         bundle->stream_update.dst = acrtc_state->stream->dst;
8415                 }
8416
8417                 if (new_pcrtc_state->color_mgmt_changed) {
8418                         /*
8419                          * TODO: This isn't fully correct since we've actually
8420                          * already modified the stream in place.
8421                          */
8422                         bundle->stream_update.gamut_remap =
8423                                 &acrtc_state->stream->gamut_remap_matrix;
8424                         bundle->stream_update.output_csc_transform =
8425                                 &acrtc_state->stream->csc_color_matrix;
8426                         bundle->stream_update.out_transfer_func =
8427                                 acrtc_state->stream->out_transfer_func;
8428                 }
8429
8430                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
8431                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
8432                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
8433
8434                 /*
8435                  * If FreeSync state on the stream has changed then we need to
8436                  * re-adjust the min/max bounds now that DC doesn't handle this
8437                  * as part of commit.
8438                  */
8439                 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
8440                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8441                         dc_stream_adjust_vmin_vmax(
8442                                 dm->dc, acrtc_state->stream,
8443                                 &acrtc_attach->dm_irq_params.vrr_params.adjust);
8444                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8445                 }
8446                 mutex_lock(&dm->dc_lock);
8447                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8448                                 acrtc_state->stream->link->psr_settings.psr_allow_active)
8449                         amdgpu_dm_psr_disable(acrtc_state->stream);
8450
8451                 dc_commit_updates_for_stream(dm->dc,
8452                                                      bundle->surface_updates,
8453                                                      planes_count,
8454                                                      acrtc_state->stream,
8455                                                      &bundle->stream_update,
8456                                                      dc_state);
8457
8458                 /**
8459                  * Enable or disable the interrupts on the backend.
8460                  *
8461                  * Most pipes are put into power gating when unused.
8462                  *
8463                  * When power gating is enabled on a pipe we lose the
8464                  * interrupt enablement state when power gating is disabled.
8465                  *
8466                  * So we need to update the IRQ control state in hardware
8467                  * whenever the pipe turns on (since it could be previously
8468                  * power gated) or off (since some pipes can't be power gated
8469                  * on some ASICs).
8470                  */
8471                 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8472                         dm_update_pflip_irq_state(drm_to_adev(dev),
8473                                                   acrtc_attach);
8474
8475                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8476                                 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8477                                 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8478                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
8479                 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
8480                                 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8481                                 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
8482                         amdgpu_dm_psr_enable(acrtc_state->stream);
8483                 }
8484
8485                 mutex_unlock(&dm->dc_lock);
8486         }
8487
8488         /*
8489          * Update cursor state *after* programming all the planes.
8490          * This avoids redundant programming in the case where we're going
8491          * to be disabling a single plane - those pipes are being disabled.
8492          */
8493         if (acrtc_state->active_planes)
8494                 amdgpu_dm_commit_cursors(state);
8495
8496 cleanup:
8497         kfree(bundle);
8498 }
8499
8500 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8501                                    struct drm_atomic_state *state)
8502 {
8503         struct amdgpu_device *adev = drm_to_adev(dev);
8504         struct amdgpu_dm_connector *aconnector;
8505         struct drm_connector *connector;
8506         struct drm_connector_state *old_con_state, *new_con_state;
8507         struct drm_crtc_state *new_crtc_state;
8508         struct dm_crtc_state *new_dm_crtc_state;
8509         const struct dc_stream_status *status;
8510         int i, inst;
8511
8512         /* Notify device removals. */
8513         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8514                 if (old_con_state->crtc != new_con_state->crtc) {
8515                         /* CRTC changes require notification. */
8516                         goto notify;
8517                 }
8518
8519                 if (!new_con_state->crtc)
8520                         continue;
8521
8522                 new_crtc_state = drm_atomic_get_new_crtc_state(
8523                         state, new_con_state->crtc);
8524
8525                 if (!new_crtc_state)
8526                         continue;
8527
8528                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8529                         continue;
8530
8531         notify:
8532                 aconnector = to_amdgpu_dm_connector(connector);
8533
8534                 mutex_lock(&adev->dm.audio_lock);
8535                 inst = aconnector->audio_inst;
8536                 aconnector->audio_inst = -1;
8537                 mutex_unlock(&adev->dm.audio_lock);
8538
8539                 amdgpu_dm_audio_eld_notify(adev, inst);
8540         }
8541
8542         /* Notify audio device additions. */
8543         for_each_new_connector_in_state(state, connector, new_con_state, i) {
8544                 if (!new_con_state->crtc)
8545                         continue;
8546
8547                 new_crtc_state = drm_atomic_get_new_crtc_state(
8548                         state, new_con_state->crtc);
8549
8550                 if (!new_crtc_state)
8551                         continue;
8552
8553                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8554                         continue;
8555
8556                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8557                 if (!new_dm_crtc_state->stream)
8558                         continue;
8559
8560                 status = dc_stream_get_status(new_dm_crtc_state->stream);
8561                 if (!status)
8562                         continue;
8563
8564                 aconnector = to_amdgpu_dm_connector(connector);
8565
8566                 mutex_lock(&adev->dm.audio_lock);
8567                 inst = status->audio_inst;
8568                 aconnector->audio_inst = inst;
8569                 mutex_unlock(&adev->dm.audio_lock);
8570
8571                 amdgpu_dm_audio_eld_notify(adev, inst);
8572         }
8573 }
8574
8575 /*
8576  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8577  * @crtc_state: the DRM CRTC state
8578  * @stream_state: the DC stream state.
8579  *
8580  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8581  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8582  */
8583 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8584                                                 struct dc_stream_state *stream_state)
8585 {
8586         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8587 }
8588
8589 /**
8590  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8591  * @state: The atomic state to commit
8592  *
8593  * This will tell DC to commit the constructed DC state from atomic_check,
8594  * programming the hardware. Any failures here implies a hardware failure, since
8595  * atomic check should have filtered anything non-kosher.
8596  */
8597 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8598 {
8599         struct drm_device *dev = state->dev;
8600         struct amdgpu_device *adev = drm_to_adev(dev);
8601         struct amdgpu_display_manager *dm = &adev->dm;
8602         struct dm_atomic_state *dm_state;
8603         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8604         uint32_t i, j;
8605         struct drm_crtc *crtc;
8606         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8607         unsigned long flags;
8608         bool wait_for_vblank = true;
8609         struct drm_connector *connector;
8610         struct drm_connector_state *old_con_state, *new_con_state;
8611         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8612         int crtc_disable_count = 0;
8613         bool mode_set_reset_required = false;
8614
8615         trace_amdgpu_dm_atomic_commit_tail_begin(state);
8616
8617         drm_atomic_helper_update_legacy_modeset_state(dev, state);
8618
8619         dm_state = dm_atomic_get_new_state(state);
8620         if (dm_state && dm_state->context) {
8621                 dc_state = dm_state->context;
8622         } else {
8623                 /* No state changes, retain current state. */
8624                 dc_state_temp = dc_create_state(dm->dc);
8625                 ASSERT(dc_state_temp);
8626                 dc_state = dc_state_temp;
8627                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8628         }
8629
8630         for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8631                                        new_crtc_state, i) {
8632                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8633
8634                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8635
8636                 if (old_crtc_state->active &&
8637                     (!new_crtc_state->active ||
8638                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8639                         manage_dm_interrupts(adev, acrtc, false);
8640                         dc_stream_release(dm_old_crtc_state->stream);
8641                 }
8642         }
8643
8644         drm_atomic_helper_calc_timestamping_constants(state);
8645
8646         /* update changed items */
8647         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8648                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8649
8650                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8651                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8652
8653                 DRM_DEBUG_ATOMIC(
8654                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8655                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8656                         "connectors_changed:%d\n",
8657                         acrtc->crtc_id,
8658                         new_crtc_state->enable,
8659                         new_crtc_state->active,
8660                         new_crtc_state->planes_changed,
8661                         new_crtc_state->mode_changed,
8662                         new_crtc_state->active_changed,
8663                         new_crtc_state->connectors_changed);
8664
8665                 /* Disable cursor if disabling crtc */
8666                 if (old_crtc_state->active && !new_crtc_state->active) {
8667                         struct dc_cursor_position position;
8668
8669                         memset(&position, 0, sizeof(position));
8670                         mutex_lock(&dm->dc_lock);
8671                         dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8672                         mutex_unlock(&dm->dc_lock);
8673                 }
8674
8675                 /* Copy all transient state flags into dc state */
8676                 if (dm_new_crtc_state->stream) {
8677                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8678                                                             dm_new_crtc_state->stream);
8679                 }
8680
8681                 /* handles headless hotplug case, updating new_state and
8682                  * aconnector as needed
8683                  */
8684
8685                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8686
8687                         DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8688
8689                         if (!dm_new_crtc_state->stream) {
8690                                 /*
8691                                  * this could happen because of issues with
8692                                  * userspace notifications delivery.
8693                                  * In this case userspace tries to set mode on
8694                                  * display which is disconnected in fact.
8695                                  * dc_sink is NULL in this case on aconnector.
8696                                  * We expect reset mode will come soon.
8697                                  *
8698                                  * This can also happen when unplug is done
8699                                  * during resume sequence ended
8700                                  *
8701                                  * In this case, we want to pretend we still
8702                                  * have a sink to keep the pipe running so that
8703                                  * hw state is consistent with the sw state
8704                                  */
8705                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8706                                                 __func__, acrtc->base.base.id);
8707                                 continue;
8708                         }
8709
8710                         if (dm_old_crtc_state->stream)
8711                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8712
8713                         pm_runtime_get_noresume(dev->dev);
8714
8715                         acrtc->enabled = true;
8716                         acrtc->hw_mode = new_crtc_state->mode;
8717                         crtc->hwmode = new_crtc_state->mode;
8718                         mode_set_reset_required = true;
8719                 } else if (modereset_required(new_crtc_state)) {
8720                         DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8721                         /* i.e. reset mode */
8722                         if (dm_old_crtc_state->stream)
8723                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8724
8725                         mode_set_reset_required = true;
8726                 }
8727         } /* for_each_crtc_in_state() */
8728
8729         if (dc_state) {
8730                 /* if there mode set or reset, disable eDP PSR */
8731                 if (mode_set_reset_required)
8732                         amdgpu_dm_psr_disable_all(dm);
8733
8734                 dm_enable_per_frame_crtc_master_sync(dc_state);
8735                 mutex_lock(&dm->dc_lock);
8736                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
8737 #if defined(CONFIG_DRM_AMD_DC_DCN)
8738                /* Allow idle optimization when vblank count is 0 for display off */
8739                if (dm->active_vblank_irq_count == 0)
8740                    dc_allow_idle_optimizations(dm->dc,true);
8741 #endif
8742                 mutex_unlock(&dm->dc_lock);
8743         }
8744
8745         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8746                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8747
8748                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8749
8750                 if (dm_new_crtc_state->stream != NULL) {
8751                         const struct dc_stream_status *status =
8752                                         dc_stream_get_status(dm_new_crtc_state->stream);
8753
8754                         if (!status)
8755                                 status = dc_stream_get_status_from_state(dc_state,
8756                                                                          dm_new_crtc_state->stream);
8757                         if (!status)
8758                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8759                         else
8760                                 acrtc->otg_inst = status->primary_otg_inst;
8761                 }
8762         }
8763 #ifdef CONFIG_DRM_AMD_DC_HDCP
8764         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8765                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8766                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8767                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8768
8769                 new_crtc_state = NULL;
8770
8771                 if (acrtc)
8772                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8773
8774                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8775
8776                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8777                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8778                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8779                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8780                         dm_new_con_state->update_hdcp = true;
8781                         continue;
8782                 }
8783
8784                 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8785                         hdcp_update_display(
8786                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8787                                 new_con_state->hdcp_content_type,
8788                                 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
8789         }
8790 #endif
8791
8792         /* Handle connector state changes */
8793         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8794                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8795                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8796                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8797                 struct dc_surface_update dummy_updates[MAX_SURFACES];
8798                 struct dc_stream_update stream_update;
8799                 struct dc_info_packet hdr_packet;
8800                 struct dc_stream_status *status = NULL;
8801                 bool abm_changed, hdr_changed, scaling_changed;
8802
8803                 memset(&dummy_updates, 0, sizeof(dummy_updates));
8804                 memset(&stream_update, 0, sizeof(stream_update));
8805
8806                 if (acrtc) {
8807                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8808                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8809                 }
8810
8811                 /* Skip any modesets/resets */
8812                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8813                         continue;
8814
8815                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8816                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8817
8818                 scaling_changed = is_scaling_state_different(dm_new_con_state,
8819                                                              dm_old_con_state);
8820
8821                 abm_changed = dm_new_crtc_state->abm_level !=
8822                               dm_old_crtc_state->abm_level;
8823
8824                 hdr_changed =
8825                         is_hdr_metadata_different(old_con_state, new_con_state);
8826
8827                 if (!scaling_changed && !abm_changed && !hdr_changed)
8828                         continue;
8829
8830                 stream_update.stream = dm_new_crtc_state->stream;
8831                 if (scaling_changed) {
8832                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8833                                         dm_new_con_state, dm_new_crtc_state->stream);
8834
8835                         stream_update.src = dm_new_crtc_state->stream->src;
8836                         stream_update.dst = dm_new_crtc_state->stream->dst;
8837                 }
8838
8839                 if (abm_changed) {
8840                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8841
8842                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
8843                 }
8844
8845                 if (hdr_changed) {
8846                         fill_hdr_info_packet(new_con_state, &hdr_packet);
8847                         stream_update.hdr_static_metadata = &hdr_packet;
8848                 }
8849
8850                 status = dc_stream_get_status(dm_new_crtc_state->stream);
8851                 WARN_ON(!status);
8852                 WARN_ON(!status->plane_count);
8853
8854                 /*
8855                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
8856                  * Here we create an empty update on each plane.
8857                  * To fix this, DC should permit updating only stream properties.
8858                  */
8859                 for (j = 0; j < status->plane_count; j++)
8860                         dummy_updates[j].surface = status->plane_states[0];
8861
8862
8863                 mutex_lock(&dm->dc_lock);
8864                 dc_commit_updates_for_stream(dm->dc,
8865                                                      dummy_updates,
8866                                                      status->plane_count,
8867                                                      dm_new_crtc_state->stream,
8868                                                      &stream_update,
8869                                                      dc_state);
8870                 mutex_unlock(&dm->dc_lock);
8871         }
8872
8873         /* Count number of newly disabled CRTCs for dropping PM refs later. */
8874         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8875                                       new_crtc_state, i) {
8876                 if (old_crtc_state->active && !new_crtc_state->active)
8877                         crtc_disable_count++;
8878
8879                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8880                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8881
8882                 /* For freesync config update on crtc state and params for irq */
8883                 update_stream_irq_parameters(dm, dm_new_crtc_state);
8884
8885                 /* Handle vrr on->off / off->on transitions */
8886                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8887                                                 dm_new_crtc_state);
8888         }
8889
8890         /**
8891          * Enable interrupts for CRTCs that are newly enabled or went through
8892          * a modeset. It was intentionally deferred until after the front end
8893          * state was modified to wait until the OTG was on and so the IRQ
8894          * handlers didn't access stale or invalid state.
8895          */
8896         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8897                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8898 #ifdef CONFIG_DEBUG_FS
8899                 bool configure_crc = false;
8900                 enum amdgpu_dm_pipe_crc_source cur_crc_src;
8901 #endif
8902                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8903
8904                 if (new_crtc_state->active &&
8905                     (!old_crtc_state->active ||
8906                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8907                         dc_stream_retain(dm_new_crtc_state->stream);
8908                         acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8909                         manage_dm_interrupts(adev, acrtc, true);
8910
8911 #ifdef CONFIG_DEBUG_FS
8912                         /**
8913                          * Frontend may have changed so reapply the CRC capture
8914                          * settings for the stream.
8915                          */
8916                         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8917                         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8918                         cur_crc_src = acrtc->dm_irq_params.crc_src;
8919                         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8920
8921                         if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
8922                                 configure_crc = true;
8923 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8924                                 if (amdgpu_dm_crc_window_is_activated(crtc))
8925                                         configure_crc = false;
8926 #endif
8927                         }
8928
8929                         if (configure_crc)
8930                                 amdgpu_dm_crtc_configure_crc_source(
8931                                         crtc, dm_new_crtc_state, cur_crc_src);
8932 #endif
8933                 }
8934         }
8935
8936         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
8937                 if (new_crtc_state->async_flip)
8938                         wait_for_vblank = false;
8939
8940         /* update planes when needed per crtc*/
8941         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
8942                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8943
8944                 if (dm_new_crtc_state->stream)
8945                         amdgpu_dm_commit_planes(state, dc_state, dev,
8946                                                 dm, crtc, wait_for_vblank);
8947         }
8948
8949         /* Update audio instances for each connector. */
8950         amdgpu_dm_commit_audio(dev, state);
8951
8952         /*
8953          * send vblank event on all events not handled in flip and
8954          * mark consumed event for drm_atomic_helper_commit_hw_done
8955          */
8956         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8957         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8958
8959                 if (new_crtc_state->event)
8960                         drm_send_event_locked(dev, &new_crtc_state->event->base);
8961
8962                 new_crtc_state->event = NULL;
8963         }
8964         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8965
8966         /* Signal HW programming completion */
8967         drm_atomic_helper_commit_hw_done(state);
8968
8969         if (wait_for_vblank)
8970                 drm_atomic_helper_wait_for_flip_done(dev, state);
8971
8972         drm_atomic_helper_cleanup_planes(dev, state);
8973
8974         /* return the stolen vga memory back to VRAM */
8975         if (!adev->mman.keep_stolen_vga_memory)
8976                 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8977         amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8978
8979         /*
8980          * Finally, drop a runtime PM reference for each newly disabled CRTC,
8981          * so we can put the GPU into runtime suspend if we're not driving any
8982          * displays anymore
8983          */
8984         for (i = 0; i < crtc_disable_count; i++)
8985                 pm_runtime_put_autosuspend(dev->dev);
8986         pm_runtime_mark_last_busy(dev->dev);
8987
8988         if (dc_state_temp)
8989                 dc_release_state(dc_state_temp);
8990 }
8991
8992
8993 static int dm_force_atomic_commit(struct drm_connector *connector)
8994 {
8995         int ret = 0;
8996         struct drm_device *ddev = connector->dev;
8997         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8998         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8999         struct drm_plane *plane = disconnected_acrtc->base.primary;
9000         struct drm_connector_state *conn_state;
9001         struct drm_crtc_state *crtc_state;
9002         struct drm_plane_state *plane_state;
9003
9004         if (!state)
9005                 return -ENOMEM;
9006
9007         state->acquire_ctx = ddev->mode_config.acquire_ctx;
9008
9009         /* Construct an atomic state to restore previous display setting */
9010
9011         /*
9012          * Attach connectors to drm_atomic_state
9013          */
9014         conn_state = drm_atomic_get_connector_state(state, connector);
9015
9016         ret = PTR_ERR_OR_ZERO(conn_state);
9017         if (ret)
9018                 goto out;
9019
9020         /* Attach crtc to drm_atomic_state*/
9021         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9022
9023         ret = PTR_ERR_OR_ZERO(crtc_state);
9024         if (ret)
9025                 goto out;
9026
9027         /* force a restore */
9028         crtc_state->mode_changed = true;
9029
9030         /* Attach plane to drm_atomic_state */
9031         plane_state = drm_atomic_get_plane_state(state, plane);
9032
9033         ret = PTR_ERR_OR_ZERO(plane_state);
9034         if (ret)
9035                 goto out;
9036
9037         /* Call commit internally with the state we just constructed */
9038         ret = drm_atomic_commit(state);
9039
9040 out:
9041         drm_atomic_state_put(state);
9042         if (ret)
9043                 DRM_ERROR("Restoring old state failed with %i\n", ret);
9044
9045         return ret;
9046 }
9047
9048 /*
9049  * This function handles all cases when set mode does not come upon hotplug.
9050  * This includes when a display is unplugged then plugged back into the
9051  * same port and when running without usermode desktop manager supprot
9052  */
9053 void dm_restore_drm_connector_state(struct drm_device *dev,
9054                                     struct drm_connector *connector)
9055 {
9056         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9057         struct amdgpu_crtc *disconnected_acrtc;
9058         struct dm_crtc_state *acrtc_state;
9059
9060         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9061                 return;
9062
9063         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9064         if (!disconnected_acrtc)
9065                 return;
9066
9067         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9068         if (!acrtc_state->stream)
9069                 return;
9070
9071         /*
9072          * If the previous sink is not released and different from the current,
9073          * we deduce we are in a state where we can not rely on usermode call
9074          * to turn on the display, so we do it here
9075          */
9076         if (acrtc_state->stream->sink != aconnector->dc_sink)
9077                 dm_force_atomic_commit(&aconnector->base);
9078 }
9079
9080 /*
9081  * Grabs all modesetting locks to serialize against any blocking commits,
9082  * Waits for completion of all non blocking commits.
9083  */
9084 static int do_aquire_global_lock(struct drm_device *dev,
9085                                  struct drm_atomic_state *state)
9086 {
9087         struct drm_crtc *crtc;
9088         struct drm_crtc_commit *commit;
9089         long ret;
9090
9091         /*
9092          * Adding all modeset locks to aquire_ctx will
9093          * ensure that when the framework release it the
9094          * extra locks we are locking here will get released to
9095          */
9096         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9097         if (ret)
9098                 return ret;
9099
9100         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9101                 spin_lock(&crtc->commit_lock);
9102                 commit = list_first_entry_or_null(&crtc->commit_list,
9103                                 struct drm_crtc_commit, commit_entry);
9104                 if (commit)
9105                         drm_crtc_commit_get(commit);
9106                 spin_unlock(&crtc->commit_lock);
9107
9108                 if (!commit)
9109                         continue;
9110
9111                 /*
9112                  * Make sure all pending HW programming completed and
9113                  * page flips done
9114                  */
9115                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9116
9117                 if (ret > 0)
9118                         ret = wait_for_completion_interruptible_timeout(
9119                                         &commit->flip_done, 10*HZ);
9120
9121                 if (ret == 0)
9122                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9123                                   "timed out\n", crtc->base.id, crtc->name);
9124
9125                 drm_crtc_commit_put(commit);
9126         }
9127
9128         return ret < 0 ? ret : 0;
9129 }
9130
9131 static void get_freesync_config_for_crtc(
9132         struct dm_crtc_state *new_crtc_state,
9133         struct dm_connector_state *new_con_state)
9134 {
9135         struct mod_freesync_config config = {0};
9136         struct amdgpu_dm_connector *aconnector =
9137                         to_amdgpu_dm_connector(new_con_state->base.connector);
9138         struct drm_display_mode *mode = &new_crtc_state->base.mode;
9139         int vrefresh = drm_mode_vrefresh(mode);
9140         bool fs_vid_mode = false;
9141
9142         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9143                                         vrefresh >= aconnector->min_vfreq &&
9144                                         vrefresh <= aconnector->max_vfreq;
9145
9146         if (new_crtc_state->vrr_supported) {
9147                 new_crtc_state->stream->ignore_msa_timing_param = true;
9148                 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9149
9150                 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9151                 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9152                 config.vsif_supported = true;
9153                 config.btr = true;
9154
9155                 if (fs_vid_mode) {
9156                         config.state = VRR_STATE_ACTIVE_FIXED;
9157                         config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9158                         goto out;
9159                 } else if (new_crtc_state->base.vrr_enabled) {
9160                         config.state = VRR_STATE_ACTIVE_VARIABLE;
9161                 } else {
9162                         config.state = VRR_STATE_INACTIVE;
9163                 }
9164         }
9165 out:
9166         new_crtc_state->freesync_config = config;
9167 }
9168
9169 static void reset_freesync_config_for_crtc(
9170         struct dm_crtc_state *new_crtc_state)
9171 {
9172         new_crtc_state->vrr_supported = false;
9173
9174         memset(&new_crtc_state->vrr_infopacket, 0,
9175                sizeof(new_crtc_state->vrr_infopacket));
9176 }
9177
9178 static bool
9179 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9180                                  struct drm_crtc_state *new_crtc_state)
9181 {
9182         struct drm_display_mode old_mode, new_mode;
9183
9184         if (!old_crtc_state || !new_crtc_state)
9185                 return false;
9186
9187         old_mode = old_crtc_state->mode;
9188         new_mode = new_crtc_state->mode;
9189
9190         if (old_mode.clock       == new_mode.clock &&
9191             old_mode.hdisplay    == new_mode.hdisplay &&
9192             old_mode.vdisplay    == new_mode.vdisplay &&
9193             old_mode.htotal      == new_mode.htotal &&
9194             old_mode.vtotal      != new_mode.vtotal &&
9195             old_mode.hsync_start == new_mode.hsync_start &&
9196             old_mode.vsync_start != new_mode.vsync_start &&
9197             old_mode.hsync_end   == new_mode.hsync_end &&
9198             old_mode.vsync_end   != new_mode.vsync_end &&
9199             old_mode.hskew       == new_mode.hskew &&
9200             old_mode.vscan       == new_mode.vscan &&
9201             (old_mode.vsync_end - old_mode.vsync_start) ==
9202             (new_mode.vsync_end - new_mode.vsync_start))
9203                 return true;
9204
9205         return false;
9206 }
9207
9208 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9209         uint64_t num, den, res;
9210         struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9211
9212         dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9213
9214         num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9215         den = (unsigned long long)new_crtc_state->mode.htotal *
9216               (unsigned long long)new_crtc_state->mode.vtotal;
9217
9218         res = div_u64(num, den);
9219         dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9220 }
9221
9222 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9223                                 struct drm_atomic_state *state,
9224                                 struct drm_crtc *crtc,
9225                                 struct drm_crtc_state *old_crtc_state,
9226                                 struct drm_crtc_state *new_crtc_state,
9227                                 bool enable,
9228                                 bool *lock_and_validation_needed)
9229 {
9230         struct dm_atomic_state *dm_state = NULL;
9231         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9232         struct dc_stream_state *new_stream;
9233         int ret = 0;
9234
9235         /*
9236          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9237          * update changed items
9238          */
9239         struct amdgpu_crtc *acrtc = NULL;
9240         struct amdgpu_dm_connector *aconnector = NULL;
9241         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9242         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
9243
9244         new_stream = NULL;
9245
9246         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9247         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9248         acrtc = to_amdgpu_crtc(crtc);
9249         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
9250
9251         /* TODO This hack should go away */
9252         if (aconnector && enable) {
9253                 /* Make sure fake sink is created in plug-in scenario */
9254                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9255                                                             &aconnector->base);
9256                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9257                                                             &aconnector->base);
9258
9259                 if (IS_ERR(drm_new_conn_state)) {
9260                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9261                         goto fail;
9262                 }
9263
9264                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9265                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
9266
9267                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9268                         goto skip_modeset;
9269
9270                 new_stream = create_validate_stream_for_sink(aconnector,
9271                                                              &new_crtc_state->mode,
9272                                                              dm_new_conn_state,
9273                                                              dm_old_crtc_state->stream);
9274
9275                 /*
9276                  * we can have no stream on ACTION_SET if a display
9277                  * was disconnected during S3, in this case it is not an
9278                  * error, the OS will be updated after detection, and
9279                  * will do the right thing on next atomic commit
9280                  */
9281
9282                 if (!new_stream) {
9283                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9284                                         __func__, acrtc->base.base.id);
9285                         ret = -ENOMEM;
9286                         goto fail;
9287                 }
9288
9289                 /*
9290                  * TODO: Check VSDB bits to decide whether this should
9291                  * be enabled or not.
9292                  */
9293                 new_stream->triggered_crtc_reset.enabled =
9294                         dm->force_timing_sync;
9295
9296                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9297
9298                 ret = fill_hdr_info_packet(drm_new_conn_state,
9299                                            &new_stream->hdr_static_metadata);
9300                 if (ret)
9301                         goto fail;
9302
9303                 /*
9304                  * If we already removed the old stream from the context
9305                  * (and set the new stream to NULL) then we can't reuse
9306                  * the old stream even if the stream and scaling are unchanged.
9307                  * We'll hit the BUG_ON and black screen.
9308                  *
9309                  * TODO: Refactor this function to allow this check to work
9310                  * in all conditions.
9311                  */
9312                 if (amdgpu_freesync_vid_mode &&
9313                     dm_new_crtc_state->stream &&
9314                     is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9315                         goto skip_modeset;
9316
9317                 if (dm_new_crtc_state->stream &&
9318                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9319                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9320                         new_crtc_state->mode_changed = false;
9321                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9322                                          new_crtc_state->mode_changed);
9323                 }
9324         }
9325
9326         /* mode_changed flag may get updated above, need to check again */
9327         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9328                 goto skip_modeset;
9329
9330         DRM_DEBUG_ATOMIC(
9331                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9332                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9333                 "connectors_changed:%d\n",
9334                 acrtc->crtc_id,
9335                 new_crtc_state->enable,
9336                 new_crtc_state->active,
9337                 new_crtc_state->planes_changed,
9338                 new_crtc_state->mode_changed,
9339                 new_crtc_state->active_changed,
9340                 new_crtc_state->connectors_changed);
9341
9342         /* Remove stream for any changed/disabled CRTC */
9343         if (!enable) {
9344
9345                 if (!dm_old_crtc_state->stream)
9346                         goto skip_modeset;
9347
9348                 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
9349                     is_timing_unchanged_for_freesync(new_crtc_state,
9350                                                      old_crtc_state)) {
9351                         new_crtc_state->mode_changed = false;
9352                         DRM_DEBUG_DRIVER(
9353                                 "Mode change not required for front porch change, "
9354                                 "setting mode_changed to %d",
9355                                 new_crtc_state->mode_changed);
9356
9357                         set_freesync_fixed_config(dm_new_crtc_state);
9358
9359                         goto skip_modeset;
9360                 } else if (amdgpu_freesync_vid_mode && aconnector &&
9361                            is_freesync_video_mode(&new_crtc_state->mode,
9362                                                   aconnector)) {
9363                         set_freesync_fixed_config(dm_new_crtc_state);
9364                 }
9365
9366                 ret = dm_atomic_get_state(state, &dm_state);
9367                 if (ret)
9368                         goto fail;
9369
9370                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9371                                 crtc->base.id);
9372
9373                 /* i.e. reset mode */
9374                 if (dc_remove_stream_from_ctx(
9375                                 dm->dc,
9376                                 dm_state->context,
9377                                 dm_old_crtc_state->stream) != DC_OK) {
9378                         ret = -EINVAL;
9379                         goto fail;
9380                 }
9381
9382                 dc_stream_release(dm_old_crtc_state->stream);
9383                 dm_new_crtc_state->stream = NULL;
9384
9385                 reset_freesync_config_for_crtc(dm_new_crtc_state);
9386
9387                 *lock_and_validation_needed = true;
9388
9389         } else {/* Add stream for any updated/enabled CRTC */
9390                 /*
9391                  * Quick fix to prevent NULL pointer on new_stream when
9392                  * added MST connectors not found in existing crtc_state in the chained mode
9393                  * TODO: need to dig out the root cause of that
9394                  */
9395                 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
9396                         goto skip_modeset;
9397
9398                 if (modereset_required(new_crtc_state))
9399                         goto skip_modeset;
9400
9401                 if (modeset_required(new_crtc_state, new_stream,
9402                                      dm_old_crtc_state->stream)) {
9403
9404                         WARN_ON(dm_new_crtc_state->stream);
9405
9406                         ret = dm_atomic_get_state(state, &dm_state);
9407                         if (ret)
9408                                 goto fail;
9409
9410                         dm_new_crtc_state->stream = new_stream;
9411
9412                         dc_stream_retain(new_stream);
9413
9414                         DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
9415                                          crtc->base.id);
9416
9417                         if (dc_add_stream_to_ctx(
9418                                         dm->dc,
9419                                         dm_state->context,
9420                                         dm_new_crtc_state->stream) != DC_OK) {
9421                                 ret = -EINVAL;
9422                                 goto fail;
9423                         }
9424
9425                         *lock_and_validation_needed = true;
9426                 }
9427         }
9428
9429 skip_modeset:
9430         /* Release extra reference */
9431         if (new_stream)
9432                  dc_stream_release(new_stream);
9433
9434         /*
9435          * We want to do dc stream updates that do not require a
9436          * full modeset below.
9437          */
9438         if (!(enable && aconnector && new_crtc_state->active))
9439                 return 0;
9440         /*
9441          * Given above conditions, the dc state cannot be NULL because:
9442          * 1. We're in the process of enabling CRTCs (just been added
9443          *    to the dc context, or already is on the context)
9444          * 2. Has a valid connector attached, and
9445          * 3. Is currently active and enabled.
9446          * => The dc stream state currently exists.
9447          */
9448         BUG_ON(dm_new_crtc_state->stream == NULL);
9449
9450         /* Scaling or underscan settings */
9451         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
9452                 update_stream_scaling_settings(
9453                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
9454
9455         /* ABM settings */
9456         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9457
9458         /*
9459          * Color management settings. We also update color properties
9460          * when a modeset is needed, to ensure it gets reprogrammed.
9461          */
9462         if (dm_new_crtc_state->base.color_mgmt_changed ||
9463             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9464                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
9465                 if (ret)
9466                         goto fail;
9467         }
9468
9469         /* Update Freesync settings. */
9470         get_freesync_config_for_crtc(dm_new_crtc_state,
9471                                      dm_new_conn_state);
9472
9473         return ret;
9474
9475 fail:
9476         if (new_stream)
9477                 dc_stream_release(new_stream);
9478         return ret;
9479 }
9480
9481 static bool should_reset_plane(struct drm_atomic_state *state,
9482                                struct drm_plane *plane,
9483                                struct drm_plane_state *old_plane_state,
9484                                struct drm_plane_state *new_plane_state)
9485 {
9486         struct drm_plane *other;
9487         struct drm_plane_state *old_other_state, *new_other_state;
9488         struct drm_crtc_state *new_crtc_state;
9489         int i;
9490
9491         /*
9492          * TODO: Remove this hack once the checks below are sufficient
9493          * enough to determine when we need to reset all the planes on
9494          * the stream.
9495          */
9496         if (state->allow_modeset)
9497                 return true;
9498
9499         /* Exit early if we know that we're adding or removing the plane. */
9500         if (old_plane_state->crtc != new_plane_state->crtc)
9501                 return true;
9502
9503         /* old crtc == new_crtc == NULL, plane not in context. */
9504         if (!new_plane_state->crtc)
9505                 return false;
9506
9507         new_crtc_state =
9508                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9509
9510         if (!new_crtc_state)
9511                 return true;
9512
9513         /* CRTC Degamma changes currently require us to recreate planes. */
9514         if (new_crtc_state->color_mgmt_changed)
9515                 return true;
9516
9517         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9518                 return true;
9519
9520         /*
9521          * If there are any new primary or overlay planes being added or
9522          * removed then the z-order can potentially change. To ensure
9523          * correct z-order and pipe acquisition the current DC architecture
9524          * requires us to remove and recreate all existing planes.
9525          *
9526          * TODO: Come up with a more elegant solution for this.
9527          */
9528         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9529                 struct amdgpu_framebuffer *old_afb, *new_afb;
9530                 if (other->type == DRM_PLANE_TYPE_CURSOR)
9531                         continue;
9532
9533                 if (old_other_state->crtc != new_plane_state->crtc &&
9534                     new_other_state->crtc != new_plane_state->crtc)
9535                         continue;
9536
9537                 if (old_other_state->crtc != new_other_state->crtc)
9538                         return true;
9539
9540                 /* Src/dst size and scaling updates. */
9541                 if (old_other_state->src_w != new_other_state->src_w ||
9542                     old_other_state->src_h != new_other_state->src_h ||
9543                     old_other_state->crtc_w != new_other_state->crtc_w ||
9544                     old_other_state->crtc_h != new_other_state->crtc_h)
9545                         return true;
9546
9547                 /* Rotation / mirroring updates. */
9548                 if (old_other_state->rotation != new_other_state->rotation)
9549                         return true;
9550
9551                 /* Blending updates. */
9552                 if (old_other_state->pixel_blend_mode !=
9553                     new_other_state->pixel_blend_mode)
9554                         return true;
9555
9556                 /* Alpha updates. */
9557                 if (old_other_state->alpha != new_other_state->alpha)
9558                         return true;
9559
9560                 /* Colorspace changes. */
9561                 if (old_other_state->color_range != new_other_state->color_range ||
9562                     old_other_state->color_encoding != new_other_state->color_encoding)
9563                         return true;
9564
9565                 /* Framebuffer checks fall at the end. */
9566                 if (!old_other_state->fb || !new_other_state->fb)
9567                         continue;
9568
9569                 /* Pixel format changes can require bandwidth updates. */
9570                 if (old_other_state->fb->format != new_other_state->fb->format)
9571                         return true;
9572
9573                 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9574                 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9575
9576                 /* Tiling and DCC changes also require bandwidth updates. */
9577                 if (old_afb->tiling_flags != new_afb->tiling_flags ||
9578                     old_afb->base.modifier != new_afb->base.modifier)
9579                         return true;
9580         }
9581
9582         return false;
9583 }
9584
9585 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9586                               struct drm_plane_state *new_plane_state,
9587                               struct drm_framebuffer *fb)
9588 {
9589         struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9590         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9591         unsigned int pitch;
9592         bool linear;
9593
9594         if (fb->width > new_acrtc->max_cursor_width ||
9595             fb->height > new_acrtc->max_cursor_height) {
9596                 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9597                                  new_plane_state->fb->width,
9598                                  new_plane_state->fb->height);
9599                 return -EINVAL;
9600         }
9601         if (new_plane_state->src_w != fb->width << 16 ||
9602             new_plane_state->src_h != fb->height << 16) {
9603                 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9604                 return -EINVAL;
9605         }
9606
9607         /* Pitch in pixels */
9608         pitch = fb->pitches[0] / fb->format->cpp[0];
9609
9610         if (fb->width != pitch) {
9611                 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9612                                  fb->width, pitch);
9613                 return -EINVAL;
9614         }
9615
9616         switch (pitch) {
9617         case 64:
9618         case 128:
9619         case 256:
9620                 /* FB pitch is supported by cursor plane */
9621                 break;
9622         default:
9623                 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9624                 return -EINVAL;
9625         }
9626
9627         /* Core DRM takes care of checking FB modifiers, so we only need to
9628          * check tiling flags when the FB doesn't have a modifier. */
9629         if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9630                 if (adev->family < AMDGPU_FAMILY_AI) {
9631                         linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9632                                  AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9633                                  AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9634                 } else {
9635                         linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9636                 }
9637                 if (!linear) {
9638                         DRM_DEBUG_ATOMIC("Cursor FB not linear");
9639                         return -EINVAL;
9640                 }
9641         }
9642
9643         return 0;
9644 }
9645
9646 static int dm_update_plane_state(struct dc *dc,
9647                                  struct drm_atomic_state *state,
9648                                  struct drm_plane *plane,
9649                                  struct drm_plane_state *old_plane_state,
9650                                  struct drm_plane_state *new_plane_state,
9651                                  bool enable,
9652                                  bool *lock_and_validation_needed)
9653 {
9654
9655         struct dm_atomic_state *dm_state = NULL;
9656         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9657         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9658         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9659         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9660         struct amdgpu_crtc *new_acrtc;
9661         bool needs_reset;
9662         int ret = 0;
9663
9664
9665         new_plane_crtc = new_plane_state->crtc;
9666         old_plane_crtc = old_plane_state->crtc;
9667         dm_new_plane_state = to_dm_plane_state(new_plane_state);
9668         dm_old_plane_state = to_dm_plane_state(old_plane_state);
9669
9670         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9671                 if (!enable || !new_plane_crtc ||
9672                         drm_atomic_plane_disabling(plane->state, new_plane_state))
9673                         return 0;
9674
9675                 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9676
9677                 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9678                         DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9679                         return -EINVAL;
9680                 }
9681
9682                 if (new_plane_state->fb) {
9683                         ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9684                                                  new_plane_state->fb);
9685                         if (ret)
9686                                 return ret;
9687                 }
9688
9689                 return 0;
9690         }
9691
9692         needs_reset = should_reset_plane(state, plane, old_plane_state,
9693                                          new_plane_state);
9694
9695         /* Remove any changed/removed planes */
9696         if (!enable) {
9697                 if (!needs_reset)
9698                         return 0;
9699
9700                 if (!old_plane_crtc)
9701                         return 0;
9702
9703                 old_crtc_state = drm_atomic_get_old_crtc_state(
9704                                 state, old_plane_crtc);
9705                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9706
9707                 if (!dm_old_crtc_state->stream)
9708                         return 0;
9709
9710                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9711                                 plane->base.id, old_plane_crtc->base.id);
9712
9713                 ret = dm_atomic_get_state(state, &dm_state);
9714                 if (ret)
9715                         return ret;
9716
9717                 if (!dc_remove_plane_from_context(
9718                                 dc,
9719                                 dm_old_crtc_state->stream,
9720                                 dm_old_plane_state->dc_state,
9721                                 dm_state->context)) {
9722
9723                         return -EINVAL;
9724                 }
9725
9726
9727                 dc_plane_state_release(dm_old_plane_state->dc_state);
9728                 dm_new_plane_state->dc_state = NULL;
9729
9730                 *lock_and_validation_needed = true;
9731
9732         } else { /* Add new planes */
9733                 struct dc_plane_state *dc_new_plane_state;
9734
9735                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9736                         return 0;
9737
9738                 if (!new_plane_crtc)
9739                         return 0;
9740
9741                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9742                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9743
9744                 if (!dm_new_crtc_state->stream)
9745                         return 0;
9746
9747                 if (!needs_reset)
9748                         return 0;
9749
9750                 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9751                 if (ret)
9752                         return ret;
9753
9754                 WARN_ON(dm_new_plane_state->dc_state);
9755
9756                 dc_new_plane_state = dc_create_plane_state(dc);
9757                 if (!dc_new_plane_state)
9758                         return -ENOMEM;
9759
9760                 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
9761                                  plane->base.id, new_plane_crtc->base.id);
9762
9763                 ret = fill_dc_plane_attributes(
9764                         drm_to_adev(new_plane_crtc->dev),
9765                         dc_new_plane_state,
9766                         new_plane_state,
9767                         new_crtc_state);
9768                 if (ret) {
9769                         dc_plane_state_release(dc_new_plane_state);
9770                         return ret;
9771                 }
9772
9773                 ret = dm_atomic_get_state(state, &dm_state);
9774                 if (ret) {
9775                         dc_plane_state_release(dc_new_plane_state);
9776                         return ret;
9777                 }
9778
9779                 /*
9780                  * Any atomic check errors that occur after this will
9781                  * not need a release. The plane state will be attached
9782                  * to the stream, and therefore part of the atomic
9783                  * state. It'll be released when the atomic state is
9784                  * cleaned.
9785                  */
9786                 if (!dc_add_plane_to_context(
9787                                 dc,
9788                                 dm_new_crtc_state->stream,
9789                                 dc_new_plane_state,
9790                                 dm_state->context)) {
9791
9792                         dc_plane_state_release(dc_new_plane_state);
9793                         return -EINVAL;
9794                 }
9795
9796                 dm_new_plane_state->dc_state = dc_new_plane_state;
9797
9798                 /* Tell DC to do a full surface update every time there
9799                  * is a plane change. Inefficient, but works for now.
9800                  */
9801                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9802
9803                 *lock_and_validation_needed = true;
9804         }
9805
9806
9807         return ret;
9808 }
9809
9810 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9811                                 struct drm_crtc *crtc,
9812                                 struct drm_crtc_state *new_crtc_state)
9813 {
9814         struct drm_plane_state *new_cursor_state, *new_primary_state;
9815         int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9816
9817         /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9818          * cursor per pipe but it's going to inherit the scaling and
9819          * positioning from the underlying pipe. Check the cursor plane's
9820          * blending properties match the primary plane's. */
9821
9822         new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9823         new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9824         if (!new_cursor_state || !new_primary_state ||
9825             !new_cursor_state->fb || !new_primary_state->fb) {
9826                 return 0;
9827         }
9828
9829         cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9830                          (new_cursor_state->src_w >> 16);
9831         cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9832                          (new_cursor_state->src_h >> 16);
9833
9834         primary_scale_w = new_primary_state->crtc_w * 1000 /
9835                          (new_primary_state->src_w >> 16);
9836         primary_scale_h = new_primary_state->crtc_h * 1000 /
9837                          (new_primary_state->src_h >> 16);
9838
9839         if (cursor_scale_w != primary_scale_w ||
9840             cursor_scale_h != primary_scale_h) {
9841                 DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9842                 return -EINVAL;
9843         }
9844
9845         return 0;
9846 }
9847
9848 #if defined(CONFIG_DRM_AMD_DC_DCN)
9849 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9850 {
9851         struct drm_connector *connector;
9852         struct drm_connector_state *conn_state;
9853         struct amdgpu_dm_connector *aconnector = NULL;
9854         int i;
9855         for_each_new_connector_in_state(state, connector, conn_state, i) {
9856                 if (conn_state->crtc != crtc)
9857                         continue;
9858
9859                 aconnector = to_amdgpu_dm_connector(connector);
9860                 if (!aconnector->port || !aconnector->mst_port)
9861                         aconnector = NULL;
9862                 else
9863                         break;
9864         }
9865
9866         if (!aconnector)
9867                 return 0;
9868
9869         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9870 }
9871 #endif
9872
9873 static int validate_overlay(struct drm_atomic_state *state)
9874 {
9875         int i;
9876         struct drm_plane *plane;
9877         struct drm_plane_state *old_plane_state, *new_plane_state;
9878         struct drm_plane_state *primary_state, *overlay_state = NULL;
9879
9880         /* Check if primary plane is contained inside overlay */
9881         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9882                 if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
9883                         if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9884                                 return 0;
9885
9886                         overlay_state = new_plane_state;
9887                         continue;
9888                 }
9889         }
9890
9891         /* check if we're making changes to the overlay plane */
9892         if (!overlay_state)
9893                 return 0;
9894
9895         /* check if overlay plane is enabled */
9896         if (!overlay_state->crtc)
9897                 return 0;
9898
9899         /* find the primary plane for the CRTC that the overlay is enabled on */
9900         primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
9901         if (IS_ERR(primary_state))
9902                 return PTR_ERR(primary_state);
9903
9904         /* check if primary plane is enabled */
9905         if (!primary_state->crtc)
9906                 return 0;
9907
9908         /* Perform the bounds check to ensure the overlay plane covers the primary */
9909         if (primary_state->crtc_x < overlay_state->crtc_x ||
9910             primary_state->crtc_y < overlay_state->crtc_y ||
9911             primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
9912             primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
9913                 DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
9914                 return -EINVAL;
9915         }
9916
9917         return 0;
9918 }
9919
9920 /**
9921  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9922  * @dev: The DRM device
9923  * @state: The atomic state to commit
9924  *
9925  * Validate that the given atomic state is programmable by DC into hardware.
9926  * This involves constructing a &struct dc_state reflecting the new hardware
9927  * state we wish to commit, then querying DC to see if it is programmable. It's
9928  * important not to modify the existing DC state. Otherwise, atomic_check
9929  * may unexpectedly commit hardware changes.
9930  *
9931  * When validating the DC state, it's important that the right locks are
9932  * acquired. For full updates case which removes/adds/updates streams on one
9933  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9934  * that any such full update commit will wait for completion of any outstanding
9935  * flip using DRMs synchronization events.
9936  *
9937  * Note that DM adds the affected connectors for all CRTCs in state, when that
9938  * might not seem necessary. This is because DC stream creation requires the
9939  * DC sink, which is tied to the DRM connector state. Cleaning this up should
9940  * be possible but non-trivial - a possible TODO item.
9941  *
9942  * Return: -Error code if validation failed.
9943  */
9944 static int amdgpu_dm_atomic_check(struct drm_device *dev,
9945                                   struct drm_atomic_state *state)
9946 {
9947         struct amdgpu_device *adev = drm_to_adev(dev);
9948         struct dm_atomic_state *dm_state = NULL;
9949         struct dc *dc = adev->dm.dc;
9950         struct drm_connector *connector;
9951         struct drm_connector_state *old_con_state, *new_con_state;
9952         struct drm_crtc *crtc;
9953         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9954         struct drm_plane *plane;
9955         struct drm_plane_state *old_plane_state, *new_plane_state;
9956         enum dc_status status;
9957         int ret, i;
9958         bool lock_and_validation_needed = false;
9959         struct dm_crtc_state *dm_old_crtc_state;
9960
9961         trace_amdgpu_dm_atomic_check_begin(state);
9962
9963         ret = drm_atomic_helper_check_modeset(dev, state);
9964         if (ret)
9965                 goto fail;
9966
9967         /* Check connector changes */
9968         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9969                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9970                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9971
9972                 /* Skip connectors that are disabled or part of modeset already. */
9973                 if (!old_con_state->crtc && !new_con_state->crtc)
9974                         continue;
9975
9976                 if (!new_con_state->crtc)
9977                         continue;
9978
9979                 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9980                 if (IS_ERR(new_crtc_state)) {
9981                         ret = PTR_ERR(new_crtc_state);
9982                         goto fail;
9983                 }
9984
9985                 if (dm_old_con_state->abm_level !=
9986                     dm_new_con_state->abm_level)
9987                         new_crtc_state->connectors_changed = true;
9988         }
9989
9990 #if defined(CONFIG_DRM_AMD_DC_DCN)
9991         if (dc_resource_is_dsc_encoding_supported(dc)) {
9992                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9993                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9994                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
9995                                 if (ret)
9996                                         goto fail;
9997                         }
9998                 }
9999         }
10000 #endif
10001         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10002                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10003
10004                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10005                     !new_crtc_state->color_mgmt_changed &&
10006                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10007                         dm_old_crtc_state->dsc_force_changed == false)
10008                         continue;
10009
10010                 if (!new_crtc_state->enable)
10011                         continue;
10012
10013                 ret = drm_atomic_add_affected_connectors(state, crtc);
10014                 if (ret)
10015                         return ret;
10016
10017                 ret = drm_atomic_add_affected_planes(state, crtc);
10018                 if (ret)
10019                         goto fail;
10020
10021                 if (dm_old_crtc_state->dsc_force_changed)
10022                         new_crtc_state->mode_changed = true;
10023         }
10024
10025         /*
10026          * Add all primary and overlay planes on the CRTC to the state
10027          * whenever a plane is enabled to maintain correct z-ordering
10028          * and to enable fast surface updates.
10029          */
10030         drm_for_each_crtc(crtc, dev) {
10031                 bool modified = false;
10032
10033                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10034                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
10035                                 continue;
10036
10037                         if (new_plane_state->crtc == crtc ||
10038                             old_plane_state->crtc == crtc) {
10039                                 modified = true;
10040                                 break;
10041                         }
10042                 }
10043
10044                 if (!modified)
10045                         continue;
10046
10047                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10048                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
10049                                 continue;
10050
10051                         new_plane_state =
10052                                 drm_atomic_get_plane_state(state, plane);
10053
10054                         if (IS_ERR(new_plane_state)) {
10055                                 ret = PTR_ERR(new_plane_state);
10056                                 goto fail;
10057                         }
10058                 }
10059         }
10060
10061         /* Remove exiting planes if they are modified */
10062         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10063                 ret = dm_update_plane_state(dc, state, plane,
10064                                             old_plane_state,
10065                                             new_plane_state,
10066                                             false,
10067                                             &lock_and_validation_needed);
10068                 if (ret)
10069                         goto fail;
10070         }
10071
10072         /* Disable all crtcs which require disable */
10073         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10074                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10075                                            old_crtc_state,
10076                                            new_crtc_state,
10077                                            false,
10078                                            &lock_and_validation_needed);
10079                 if (ret)
10080                         goto fail;
10081         }
10082
10083         /* Enable all crtcs which require enable */
10084         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10085                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10086                                            old_crtc_state,
10087                                            new_crtc_state,
10088                                            true,
10089                                            &lock_and_validation_needed);
10090                 if (ret)
10091                         goto fail;
10092         }
10093
10094         ret = validate_overlay(state);
10095         if (ret)
10096                 goto fail;
10097
10098         /* Add new/modified planes */
10099         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10100                 ret = dm_update_plane_state(dc, state, plane,
10101                                             old_plane_state,
10102                                             new_plane_state,
10103                                             true,
10104                                             &lock_and_validation_needed);
10105                 if (ret)
10106                         goto fail;
10107         }
10108
10109         /* Run this here since we want to validate the streams we created */
10110         ret = drm_atomic_helper_check_planes(dev, state);
10111         if (ret)
10112                 goto fail;
10113
10114         /* Check cursor planes scaling */
10115         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10116                 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10117                 if (ret)
10118                         goto fail;
10119         }
10120
10121         if (state->legacy_cursor_update) {
10122                 /*
10123                  * This is a fast cursor update coming from the plane update
10124                  * helper, check if it can be done asynchronously for better
10125                  * performance.
10126                  */
10127                 state->async_update =
10128                         !drm_atomic_helper_async_check(dev, state);
10129
10130                 /*
10131                  * Skip the remaining global validation if this is an async
10132                  * update. Cursor updates can be done without affecting
10133                  * state or bandwidth calcs and this avoids the performance
10134                  * penalty of locking the private state object and
10135                  * allocating a new dc_state.
10136                  */
10137                 if (state->async_update)
10138                         return 0;
10139         }
10140
10141         /* Check scaling and underscan changes*/
10142         /* TODO Removed scaling changes validation due to inability to commit
10143          * new stream into context w\o causing full reset. Need to
10144          * decide how to handle.
10145          */
10146         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10147                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10148                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10149                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10150
10151                 /* Skip any modesets/resets */
10152                 if (!acrtc || drm_atomic_crtc_needs_modeset(
10153                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10154                         continue;
10155
10156                 /* Skip any thing not scale or underscan changes */
10157                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10158                         continue;
10159
10160                 lock_and_validation_needed = true;
10161         }
10162
10163         /**
10164          * Streams and planes are reset when there are changes that affect
10165          * bandwidth. Anything that affects bandwidth needs to go through
10166          * DC global validation to ensure that the configuration can be applied
10167          * to hardware.
10168          *
10169          * We have to currently stall out here in atomic_check for outstanding
10170          * commits to finish in this case because our IRQ handlers reference
10171          * DRM state directly - we can end up disabling interrupts too early
10172          * if we don't.
10173          *
10174          * TODO: Remove this stall and drop DM state private objects.
10175          */
10176         if (lock_and_validation_needed) {
10177                 ret = dm_atomic_get_state(state, &dm_state);
10178                 if (ret)
10179                         goto fail;
10180
10181                 ret = do_aquire_global_lock(dev, state);
10182                 if (ret)
10183                         goto fail;
10184
10185 #if defined(CONFIG_DRM_AMD_DC_DCN)
10186                 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
10187                         goto fail;
10188
10189                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
10190                 if (ret)
10191                         goto fail;
10192 #endif
10193
10194                 /*
10195                  * Perform validation of MST topology in the state:
10196                  * We need to perform MST atomic check before calling
10197                  * dc_validate_global_state(), or there is a chance
10198                  * to get stuck in an infinite loop and hang eventually.
10199                  */
10200                 ret = drm_dp_mst_atomic_check(state);
10201                 if (ret)
10202                         goto fail;
10203                 status = dc_validate_global_state(dc, dm_state->context, false);
10204                 if (status != DC_OK) {
10205                         DC_LOG_WARNING("DC global validation failure: %s (%d)",
10206                                        dc_status_to_str(status), status);
10207                         ret = -EINVAL;
10208                         goto fail;
10209                 }
10210         } else {
10211                 /*
10212                  * The commit is a fast update. Fast updates shouldn't change
10213                  * the DC context, affect global validation, and can have their
10214                  * commit work done in parallel with other commits not touching
10215                  * the same resource. If we have a new DC context as part of
10216                  * the DM atomic state from validation we need to free it and
10217                  * retain the existing one instead.
10218                  *
10219                  * Furthermore, since the DM atomic state only contains the DC
10220                  * context and can safely be annulled, we can free the state
10221                  * and clear the associated private object now to free
10222                  * some memory and avoid a possible use-after-free later.
10223                  */
10224
10225                 for (i = 0; i < state->num_private_objs; i++) {
10226                         struct drm_private_obj *obj = state->private_objs[i].ptr;
10227
10228                         if (obj->funcs == adev->dm.atomic_obj.funcs) {
10229                                 int j = state->num_private_objs-1;
10230
10231                                 dm_atomic_destroy_state(obj,
10232                                                 state->private_objs[i].state);
10233
10234                                 /* If i is not at the end of the array then the
10235                                  * last element needs to be moved to where i was
10236                                  * before the array can safely be truncated.
10237                                  */
10238                                 if (i != j)
10239                                         state->private_objs[i] =
10240                                                 state->private_objs[j];
10241
10242                                 state->private_objs[j].ptr = NULL;
10243                                 state->private_objs[j].state = NULL;
10244                                 state->private_objs[j].old_state = NULL;
10245                                 state->private_objs[j].new_state = NULL;
10246
10247                                 state->num_private_objs = j;
10248                                 break;
10249                         }
10250                 }
10251         }
10252
10253         /* Store the overall update type for use later in atomic check. */
10254         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10255                 struct dm_crtc_state *dm_new_crtc_state =
10256                         to_dm_crtc_state(new_crtc_state);
10257
10258                 dm_new_crtc_state->update_type = lock_and_validation_needed ?
10259                                                          UPDATE_TYPE_FULL :
10260                                                          UPDATE_TYPE_FAST;
10261         }
10262
10263         /* Must be success */
10264         WARN_ON(ret);
10265
10266         trace_amdgpu_dm_atomic_check_finish(state, ret);
10267
10268         return ret;
10269
10270 fail:
10271         if (ret == -EDEADLK)
10272                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
10273         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
10274                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
10275         else
10276                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
10277
10278         trace_amdgpu_dm_atomic_check_finish(state, ret);
10279
10280         return ret;
10281 }
10282
10283 static bool is_dp_capable_without_timing_msa(struct dc *dc,
10284                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
10285 {
10286         uint8_t dpcd_data;
10287         bool capable = false;
10288
10289         if (amdgpu_dm_connector->dc_link &&
10290                 dm_helpers_dp_read_dpcd(
10291                                 NULL,
10292                                 amdgpu_dm_connector->dc_link,
10293                                 DP_DOWN_STREAM_PORT_COUNT,
10294                                 &dpcd_data,
10295                                 sizeof(dpcd_data))) {
10296                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10297         }
10298
10299         return capable;
10300 }
10301
10302 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10303                 uint8_t *edid_ext, int len,
10304                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10305 {
10306         int i;
10307         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10308         struct dc *dc = adev->dm.dc;
10309
10310         /* send extension block to DMCU for parsing */
10311         for (i = 0; i < len; i += 8) {
10312                 bool res;
10313                 int offset;
10314
10315                 /* send 8 bytes a time */
10316                 if (!dc_edid_parser_send_cea(dc, i, len, &edid_ext[i], 8))
10317                         return false;
10318
10319                 if (i+8 == len) {
10320                         /* EDID block sent completed, expect result */
10321                         int version, min_rate, max_rate;
10322
10323                         res = dc_edid_parser_recv_amd_vsdb(dc, &version, &min_rate, &max_rate);
10324                         if (res) {
10325                                 /* amd vsdb found */
10326                                 vsdb_info->freesync_supported = 1;
10327                                 vsdb_info->amd_vsdb_version = version;
10328                                 vsdb_info->min_refresh_rate_hz = min_rate;
10329                                 vsdb_info->max_refresh_rate_hz = max_rate;
10330                                 return true;
10331                         }
10332                         /* not amd vsdb */
10333                         return false;
10334                 }
10335
10336                 /* check for ack*/
10337                 res = dc_edid_parser_recv_cea_ack(dc, &offset);
10338                 if (!res)
10339                         return false;
10340         }
10341
10342         return false;
10343 }
10344
10345 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10346                 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10347 {
10348         uint8_t *edid_ext = NULL;
10349         int i;
10350         bool valid_vsdb_found = false;
10351
10352         /*----- drm_find_cea_extension() -----*/
10353         /* No EDID or EDID extensions */
10354         if (edid == NULL || edid->extensions == 0)
10355                 return -ENODEV;
10356
10357         /* Find CEA extension */
10358         for (i = 0; i < edid->extensions; i++) {
10359                 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10360                 if (edid_ext[0] == CEA_EXT)
10361                         break;
10362         }
10363
10364         if (i == edid->extensions)
10365                 return -ENODEV;
10366
10367         /*----- cea_db_offsets() -----*/
10368         if (edid_ext[0] != CEA_EXT)
10369                 return -ENODEV;
10370
10371         valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
10372
10373         return valid_vsdb_found ? i : -ENODEV;
10374 }
10375
10376 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10377                                         struct edid *edid)
10378 {
10379         int i = 0;
10380         struct detailed_timing *timing;
10381         struct detailed_non_pixel *data;
10382         struct detailed_data_monitor_range *range;
10383         struct amdgpu_dm_connector *amdgpu_dm_connector =
10384                         to_amdgpu_dm_connector(connector);
10385         struct dm_connector_state *dm_con_state = NULL;
10386
10387         struct drm_device *dev = connector->dev;
10388         struct amdgpu_device *adev = drm_to_adev(dev);
10389         bool freesync_capable = false;
10390         struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
10391
10392         if (!connector->state) {
10393                 DRM_ERROR("%s - Connector has no state", __func__);
10394                 goto update;
10395         }
10396
10397         if (!edid) {
10398                 dm_con_state = to_dm_connector_state(connector->state);
10399
10400                 amdgpu_dm_connector->min_vfreq = 0;
10401                 amdgpu_dm_connector->max_vfreq = 0;
10402                 amdgpu_dm_connector->pixel_clock_mhz = 0;
10403
10404                 goto update;
10405         }
10406
10407         dm_con_state = to_dm_connector_state(connector->state);
10408
10409         if (!amdgpu_dm_connector->dc_sink) {
10410                 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
10411                 goto update;
10412         }
10413         if (!adev->dm.freesync_module)
10414                 goto update;
10415
10416
10417         if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10418                 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
10419                 bool edid_check_required = false;
10420
10421                 if (edid) {
10422                         edid_check_required = is_dp_capable_without_timing_msa(
10423                                                 adev->dm.dc,
10424                                                 amdgpu_dm_connector);
10425                 }
10426
10427                 if (edid_check_required == true && (edid->version > 1 ||
10428                    (edid->version == 1 && edid->revision > 1))) {
10429                         for (i = 0; i < 4; i++) {
10430
10431                                 timing  = &edid->detailed_timings[i];
10432                                 data    = &timing->data.other_data;
10433                                 range   = &data->data.range;
10434                                 /*
10435                                  * Check if monitor has continuous frequency mode
10436                                  */
10437                                 if (data->type != EDID_DETAIL_MONITOR_RANGE)
10438                                         continue;
10439                                 /*
10440                                  * Check for flag range limits only. If flag == 1 then
10441                                  * no additional timing information provided.
10442                                  * Default GTF, GTF Secondary curve and CVT are not
10443                                  * supported
10444                                  */
10445                                 if (range->flags != 1)
10446                                         continue;
10447
10448                                 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10449                                 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10450                                 amdgpu_dm_connector->pixel_clock_mhz =
10451                                         range->pixel_clock_mhz * 10;
10452
10453                                 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10454                                 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10455
10456                                 break;
10457                         }
10458
10459                         if (amdgpu_dm_connector->max_vfreq -
10460                             amdgpu_dm_connector->min_vfreq > 10) {
10461
10462                                 freesync_capable = true;
10463                         }
10464                 }
10465         } else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10466                 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10467                 if (i >= 0 && vsdb_info.freesync_supported) {
10468                         timing  = &edid->detailed_timings[i];
10469                         data    = &timing->data.other_data;
10470
10471                         amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10472                         amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10473                         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10474                                 freesync_capable = true;
10475
10476                         connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10477                         connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10478                 }
10479         }
10480
10481 update:
10482         if (dm_con_state)
10483                 dm_con_state->freesync_capable = freesync_capable;
10484
10485         if (connector->vrr_capable_property)
10486                 drm_connector_set_vrr_capable_property(connector,
10487                                                        freesync_capable);
10488 }
10489
10490 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
10491 {
10492         uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
10493
10494         if (!(link->connector_signal & SIGNAL_TYPE_EDP))
10495                 return;
10496         if (link->type == dc_connection_none)
10497                 return;
10498         if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
10499                                         dpcd_data, sizeof(dpcd_data))) {
10500                 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
10501
10502                 if (dpcd_data[0] == 0) {
10503                         link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
10504                         link->psr_settings.psr_feature_enabled = false;
10505                 } else {
10506                         link->psr_settings.psr_version = DC_PSR_VERSION_1;
10507                         link->psr_settings.psr_feature_enabled = true;
10508                 }
10509
10510                 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
10511         }
10512 }
10513
10514 /*
10515  * amdgpu_dm_link_setup_psr() - configure psr link
10516  * @stream: stream state
10517  *
10518  * Return: true if success
10519  */
10520 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
10521 {
10522         struct dc_link *link = NULL;
10523         struct psr_config psr_config = {0};
10524         struct psr_context psr_context = {0};
10525         bool ret = false;
10526
10527         if (stream == NULL)
10528                 return false;
10529
10530         link = stream->link;
10531
10532         psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
10533
10534         if (psr_config.psr_version > 0) {
10535                 psr_config.psr_exit_link_training_required = 0x1;
10536                 psr_config.psr_frame_capture_indication_req = 0;
10537                 psr_config.psr_rfb_setup_time = 0x37;
10538                 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
10539                 psr_config.allow_smu_optimizations = 0x0;
10540
10541                 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
10542
10543         }
10544         DRM_DEBUG_DRIVER("PSR link: %d\n",      link->psr_settings.psr_feature_enabled);
10545
10546         return ret;
10547 }
10548
10549 /*
10550  * amdgpu_dm_psr_enable() - enable psr f/w
10551  * @stream: stream state
10552  *
10553  * Return: true if success
10554  */
10555 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
10556 {
10557         struct dc_link *link = stream->link;
10558         unsigned int vsync_rate_hz = 0;
10559         struct dc_static_screen_params params = {0};
10560         /* Calculate number of static frames before generating interrupt to
10561          * enter PSR.
10562          */
10563         // Init fail safe of 2 frames static
10564         unsigned int num_frames_static = 2;
10565
10566         DRM_DEBUG_DRIVER("Enabling psr...\n");
10567
10568         vsync_rate_hz = div64_u64(div64_u64((
10569                         stream->timing.pix_clk_100hz * 100),
10570                         stream->timing.v_total),
10571                         stream->timing.h_total);
10572
10573         /* Round up
10574          * Calculate number of frames such that at least 30 ms of time has
10575          * passed.
10576          */
10577         if (vsync_rate_hz != 0) {
10578                 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
10579                 num_frames_static = (30000 / frame_time_microsec) + 1;
10580         }
10581
10582         params.triggers.cursor_update = true;
10583         params.triggers.overlay_update = true;
10584         params.triggers.surface_update = true;
10585         params.num_frames = num_frames_static;
10586
10587         dc_stream_set_static_screen_params(link->ctx->dc,
10588                                            &stream, 1,
10589                                            &params);
10590
10591         return dc_link_set_psr_allow_active(link, true, false, false);
10592 }
10593
10594 /*
10595  * amdgpu_dm_psr_disable() - disable psr f/w
10596  * @stream:  stream state
10597  *
10598  * Return: true if success
10599  */
10600 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
10601 {
10602
10603         DRM_DEBUG_DRIVER("Disabling psr...\n");
10604
10605         return dc_link_set_psr_allow_active(stream->link, false, true, false);
10606 }
10607
10608 /*
10609  * amdgpu_dm_psr_disable() - disable psr f/w
10610  * if psr is enabled on any stream
10611  *
10612  * Return: true if success
10613  */
10614 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
10615 {
10616         DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
10617         return dc_set_psr_allow_active(dm->dc, false);
10618 }
10619
10620 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10621 {
10622         struct amdgpu_device *adev = drm_to_adev(dev);
10623         struct dc *dc = adev->dm.dc;
10624         int i;
10625
10626         mutex_lock(&adev->dm.dc_lock);
10627         if (dc->current_state) {
10628                 for (i = 0; i < dc->current_state->stream_count; ++i)
10629                         dc->current_state->streams[i]
10630                                 ->triggered_crtc_reset.enabled =
10631                                 adev->dm.force_timing_sync;
10632
10633                 dm_enable_per_frame_crtc_master_sync(dc->current_state);
10634                 dc_trigger_sync(dc, dc->current_state);
10635         }
10636         mutex_unlock(&adev->dm.dc_lock);
10637 }
10638
10639 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10640                        uint32_t value, const char *func_name)
10641 {
10642 #ifdef DM_CHECK_ADDR_0
10643         if (address == 0) {
10644                 DC_ERR("invalid register write. address = 0");
10645                 return;
10646         }
10647 #endif
10648         cgs_write_register(ctx->cgs_device, address, value);
10649         trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10650 }
10651
10652 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10653                           const char *func_name)
10654 {
10655         uint32_t value;
10656 #ifdef DM_CHECK_ADDR_0
10657         if (address == 0) {
10658                 DC_ERR("invalid register read; address = 0\n");
10659                 return 0;
10660         }
10661 #endif
10662
10663         if (ctx->dmub_srv &&
10664             ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10665             !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10666                 ASSERT(false);
10667                 return 0;
10668         }
10669
10670         value = cgs_read_register(ctx->cgs_device, address);
10671
10672         trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10673
10674         return value;
10675 }