Merge tag 'amd-drm-next-5.14-2021-05-19' of https://gitlab.freedesktop.org/agd5f...
[platform/kernel/linux-starfive.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "dc/dc_edid_parser.h"
38 #include "dc/dc_stat.h"
39 #include "amdgpu_dm_trace.h"
40
41 #include "vid.h"
42 #include "amdgpu.h"
43 #include "amdgpu_display.h"
44 #include "amdgpu_ucode.h"
45 #include "atom.h"
46 #include "amdgpu_dm.h"
47 #ifdef CONFIG_DRM_AMD_DC_HDCP
48 #include "amdgpu_dm_hdcp.h"
49 #include <drm/drm_hdcp.h>
50 #endif
51 #include "amdgpu_pm.h"
52
53 #include "amd_shared.h"
54 #include "amdgpu_dm_irq.h"
55 #include "dm_helpers.h"
56 #include "amdgpu_dm_mst_types.h"
57 #if defined(CONFIG_DEBUG_FS)
58 #include "amdgpu_dm_debugfs.h"
59 #endif
60
61 #include "ivsrcid/ivsrcid_vislands30.h"
62
63 #include "i2caux_interface.h"
64 #include <linux/module.h>
65 #include <linux/moduleparam.h>
66 #include <linux/types.h>
67 #include <linux/pm_runtime.h>
68 #include <linux/pci.h>
69 #include <linux/firmware.h>
70 #include <linux/component.h>
71
72 #include <drm/drm_atomic.h>
73 #include <drm/drm_atomic_uapi.h>
74 #include <drm/drm_atomic_helper.h>
75 #include <drm/drm_dp_mst_helper.h>
76 #include <drm/drm_fb_helper.h>
77 #include <drm/drm_fourcc.h>
78 #include <drm/drm_edid.h>
79 #include <drm/drm_vblank.h>
80 #include <drm/drm_audio_component.h>
81
82 #if defined(CONFIG_DRM_AMD_DC_DCN)
83 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
84
85 #include "dcn/dcn_1_0_offset.h"
86 #include "dcn/dcn_1_0_sh_mask.h"
87 #include "soc15_hw_ip.h"
88 #include "vega10_ip_offset.h"
89
90 #include "soc15_common.h"
91 #endif
92
93 #include "modules/inc/mod_freesync.h"
94 #include "modules/power/power_helpers.h"
95 #include "modules/inc/mod_info_packet.h"
96
97 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
98 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
99 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
100 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
101 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
103 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
105 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
107 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
108 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
109
110 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
111 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
112
113 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
114 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
115
116 /* Number of bytes in PSP header for firmware. */
117 #define PSP_HEADER_BYTES 0x100
118
119 /* Number of bytes in PSP footer for firmware. */
120 #define PSP_FOOTER_BYTES 0x100
121
122 /**
123  * DOC: overview
124  *
125  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
126  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
127  * requests into DC requests, and DC responses into DRM responses.
128  *
129  * The root control structure is &struct amdgpu_display_manager.
130  */
131
132 /* basic init/fini API */
133 static int amdgpu_dm_init(struct amdgpu_device *adev);
134 static void amdgpu_dm_fini(struct amdgpu_device *adev);
135 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
136
137 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
138 {
139         switch (link->dpcd_caps.dongle_type) {
140         case DISPLAY_DONGLE_NONE:
141                 return DRM_MODE_SUBCONNECTOR_Native;
142         case DISPLAY_DONGLE_DP_VGA_CONVERTER:
143                 return DRM_MODE_SUBCONNECTOR_VGA;
144         case DISPLAY_DONGLE_DP_DVI_CONVERTER:
145         case DISPLAY_DONGLE_DP_DVI_DONGLE:
146                 return DRM_MODE_SUBCONNECTOR_DVID;
147         case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
148         case DISPLAY_DONGLE_DP_HDMI_DONGLE:
149                 return DRM_MODE_SUBCONNECTOR_HDMIA;
150         case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
151         default:
152                 return DRM_MODE_SUBCONNECTOR_Unknown;
153         }
154 }
155
156 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
157 {
158         struct dc_link *link = aconnector->dc_link;
159         struct drm_connector *connector = &aconnector->base;
160         enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
161
162         if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
163                 return;
164
165         if (aconnector->dc_sink)
166                 subconnector = get_subconnector_type(link);
167
168         drm_object_property_set_value(&connector->base,
169                         connector->dev->mode_config.dp_subconnector_property,
170                         subconnector);
171 }
172
173 /*
174  * initializes drm_device display related structures, based on the information
175  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
176  * drm_encoder, drm_mode_config
177  *
178  * Returns 0 on success
179  */
180 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
181 /* removes and deallocates the drm structures, created by the above function */
182 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
183
184 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
185                                 struct drm_plane *plane,
186                                 unsigned long possible_crtcs,
187                                 const struct dc_plane_cap *plane_cap);
188 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
189                                struct drm_plane *plane,
190                                uint32_t link_index);
191 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
192                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
193                                     uint32_t link_index,
194                                     struct amdgpu_encoder *amdgpu_encoder);
195 static int amdgpu_dm_encoder_init(struct drm_device *dev,
196                                   struct amdgpu_encoder *aencoder,
197                                   uint32_t link_index);
198
199 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
200
201 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
202
203 static int amdgpu_dm_atomic_check(struct drm_device *dev,
204                                   struct drm_atomic_state *state);
205
206 static void handle_cursor_update(struct drm_plane *plane,
207                                  struct drm_plane_state *old_plane_state);
208
209 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
210 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
211 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
212 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
213 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
214
215 static const struct drm_format_info *
216 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
217
218 static bool
219 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
220                                  struct drm_crtc_state *new_crtc_state);
221 /*
222  * dm_vblank_get_counter
223  *
224  * @brief
225  * Get counter for number of vertical blanks
226  *
227  * @param
228  * struct amdgpu_device *adev - [in] desired amdgpu device
229  * int disp_idx - [in] which CRTC to get the counter from
230  *
231  * @return
232  * Counter for vertical blanks
233  */
234 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
235 {
236         if (crtc >= adev->mode_info.num_crtc)
237                 return 0;
238         else {
239                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
240
241                 if (acrtc->dm_irq_params.stream == NULL) {
242                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
243                                   crtc);
244                         return 0;
245                 }
246
247                 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
248         }
249 }
250
251 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
252                                   u32 *vbl, u32 *position)
253 {
254         uint32_t v_blank_start, v_blank_end, h_position, v_position;
255
256         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
257                 return -EINVAL;
258         else {
259                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
260
261                 if (acrtc->dm_irq_params.stream ==  NULL) {
262                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
263                                   crtc);
264                         return 0;
265                 }
266
267                 /*
268                  * TODO rework base driver to use values directly.
269                  * for now parse it back into reg-format
270                  */
271                 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
272                                          &v_blank_start,
273                                          &v_blank_end,
274                                          &h_position,
275                                          &v_position);
276
277                 *position = v_position | (h_position << 16);
278                 *vbl = v_blank_start | (v_blank_end << 16);
279         }
280
281         return 0;
282 }
283
284 static bool dm_is_idle(void *handle)
285 {
286         /* XXX todo */
287         return true;
288 }
289
290 static int dm_wait_for_idle(void *handle)
291 {
292         /* XXX todo */
293         return 0;
294 }
295
296 static bool dm_check_soft_reset(void *handle)
297 {
298         return false;
299 }
300
301 static int dm_soft_reset(void *handle)
302 {
303         /* XXX todo */
304         return 0;
305 }
306
307 static struct amdgpu_crtc *
308 get_crtc_by_otg_inst(struct amdgpu_device *adev,
309                      int otg_inst)
310 {
311         struct drm_device *dev = adev_to_drm(adev);
312         struct drm_crtc *crtc;
313         struct amdgpu_crtc *amdgpu_crtc;
314
315         if (otg_inst == -1) {
316                 WARN_ON(1);
317                 return adev->mode_info.crtcs[0];
318         }
319
320         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
321                 amdgpu_crtc = to_amdgpu_crtc(crtc);
322
323                 if (amdgpu_crtc->otg_inst == otg_inst)
324                         return amdgpu_crtc;
325         }
326
327         return NULL;
328 }
329
330 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
331 {
332         return acrtc->dm_irq_params.freesync_config.state ==
333                        VRR_STATE_ACTIVE_VARIABLE ||
334                acrtc->dm_irq_params.freesync_config.state ==
335                        VRR_STATE_ACTIVE_FIXED;
336 }
337
338 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
339 {
340         return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
341                dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
342 }
343
344 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
345                                               struct dm_crtc_state *new_state)
346 {
347         if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
348                 return true;
349         else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
350                 return true;
351         else
352                 return false;
353 }
354
355 /**
356  * dm_pflip_high_irq() - Handle pageflip interrupt
357  * @interrupt_params: ignored
358  *
359  * Handles the pageflip interrupt by notifying all interested parties
360  * that the pageflip has been completed.
361  */
362 static void dm_pflip_high_irq(void *interrupt_params)
363 {
364         struct amdgpu_crtc *amdgpu_crtc;
365         struct common_irq_params *irq_params = interrupt_params;
366         struct amdgpu_device *adev = irq_params->adev;
367         unsigned long flags;
368         struct drm_pending_vblank_event *e;
369         uint32_t vpos, hpos, v_blank_start, v_blank_end;
370         bool vrr_active;
371
372         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
373
374         /* IRQ could occur when in initial stage */
375         /* TODO work and BO cleanup */
376         if (amdgpu_crtc == NULL) {
377                 DC_LOG_PFLIP("CRTC is null, returning.\n");
378                 return;
379         }
380
381         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
382
383         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
384                 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
385                                                  amdgpu_crtc->pflip_status,
386                                                  AMDGPU_FLIP_SUBMITTED,
387                                                  amdgpu_crtc->crtc_id,
388                                                  amdgpu_crtc);
389                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
390                 return;
391         }
392
393         /* page flip completed. */
394         e = amdgpu_crtc->event;
395         amdgpu_crtc->event = NULL;
396
397         if (!e)
398                 WARN_ON(1);
399
400         vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
401
402         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
403         if (!vrr_active ||
404             !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
405                                       &v_blank_end, &hpos, &vpos) ||
406             (vpos < v_blank_start)) {
407                 /* Update to correct count and vblank timestamp if racing with
408                  * vblank irq. This also updates to the correct vblank timestamp
409                  * even in VRR mode, as scanout is past the front-porch atm.
410                  */
411                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
412
413                 /* Wake up userspace by sending the pageflip event with proper
414                  * count and timestamp of vblank of flip completion.
415                  */
416                 if (e) {
417                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
418
419                         /* Event sent, so done with vblank for this flip */
420                         drm_crtc_vblank_put(&amdgpu_crtc->base);
421                 }
422         } else if (e) {
423                 /* VRR active and inside front-porch: vblank count and
424                  * timestamp for pageflip event will only be up to date after
425                  * drm_crtc_handle_vblank() has been executed from late vblank
426                  * irq handler after start of back-porch (vline 0). We queue the
427                  * pageflip event for send-out by drm_crtc_handle_vblank() with
428                  * updated timestamp and count, once it runs after us.
429                  *
430                  * We need to open-code this instead of using the helper
431                  * drm_crtc_arm_vblank_event(), as that helper would
432                  * call drm_crtc_accurate_vblank_count(), which we must
433                  * not call in VRR mode while we are in front-porch!
434                  */
435
436                 /* sequence will be replaced by real count during send-out. */
437                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
438                 e->pipe = amdgpu_crtc->crtc_id;
439
440                 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
441                 e = NULL;
442         }
443
444         /* Keep track of vblank of this flip for flip throttling. We use the
445          * cooked hw counter, as that one incremented at start of this vblank
446          * of pageflip completion, so last_flip_vblank is the forbidden count
447          * for queueing new pageflips if vsync + VRR is enabled.
448          */
449         amdgpu_crtc->dm_irq_params.last_flip_vblank =
450                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
451
452         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
453         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
454
455         DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
456                      amdgpu_crtc->crtc_id, amdgpu_crtc,
457                      vrr_active, (int) !e);
458 }
459
460 static void dm_vupdate_high_irq(void *interrupt_params)
461 {
462         struct common_irq_params *irq_params = interrupt_params;
463         struct amdgpu_device *adev = irq_params->adev;
464         struct amdgpu_crtc *acrtc;
465         struct drm_device *drm_dev;
466         struct drm_vblank_crtc *vblank;
467         ktime_t frame_duration_ns, previous_timestamp;
468         unsigned long flags;
469         int vrr_active;
470
471         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
472
473         if (acrtc) {
474                 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
475                 drm_dev = acrtc->base.dev;
476                 vblank = &drm_dev->vblank[acrtc->base.index];
477                 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
478                 frame_duration_ns = vblank->time - previous_timestamp;
479
480                 if (frame_duration_ns > 0) {
481                         trace_amdgpu_refresh_rate_track(acrtc->base.index,
482                                                 frame_duration_ns,
483                                                 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
484                         atomic64_set(&irq_params->previous_timestamp, vblank->time);
485                 }
486
487                 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
488                               acrtc->crtc_id,
489                               vrr_active);
490
491                 /* Core vblank handling is done here after end of front-porch in
492                  * vrr mode, as vblank timestamping will give valid results
493                  * while now done after front-porch. This will also deliver
494                  * page-flip completion events that have been queued to us
495                  * if a pageflip happened inside front-porch.
496                  */
497                 if (vrr_active) {
498                         drm_crtc_handle_vblank(&acrtc->base);
499
500                         /* BTR processing for pre-DCE12 ASICs */
501                         if (acrtc->dm_irq_params.stream &&
502                             adev->family < AMDGPU_FAMILY_AI) {
503                                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
504                                 mod_freesync_handle_v_update(
505                                     adev->dm.freesync_module,
506                                     acrtc->dm_irq_params.stream,
507                                     &acrtc->dm_irq_params.vrr_params);
508
509                                 dc_stream_adjust_vmin_vmax(
510                                     adev->dm.dc,
511                                     acrtc->dm_irq_params.stream,
512                                     &acrtc->dm_irq_params.vrr_params.adjust);
513                                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
514                         }
515                 }
516         }
517 }
518
519 /**
520  * dm_crtc_high_irq() - Handles CRTC interrupt
521  * @interrupt_params: used for determining the CRTC instance
522  *
523  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
524  * event handler.
525  */
526 static void dm_crtc_high_irq(void *interrupt_params)
527 {
528         struct common_irq_params *irq_params = interrupt_params;
529         struct amdgpu_device *adev = irq_params->adev;
530         struct amdgpu_crtc *acrtc;
531         unsigned long flags;
532         int vrr_active;
533
534         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
535         if (!acrtc)
536                 return;
537
538         vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
539
540         DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
541                       vrr_active, acrtc->dm_irq_params.active_planes);
542
543         /**
544          * Core vblank handling at start of front-porch is only possible
545          * in non-vrr mode, as only there vblank timestamping will give
546          * valid results while done in front-porch. Otherwise defer it
547          * to dm_vupdate_high_irq after end of front-porch.
548          */
549         if (!vrr_active)
550                 drm_crtc_handle_vblank(&acrtc->base);
551
552         /**
553          * Following stuff must happen at start of vblank, for crc
554          * computation and below-the-range btr support in vrr mode.
555          */
556         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
557
558         /* BTR updates need to happen before VUPDATE on Vega and above. */
559         if (adev->family < AMDGPU_FAMILY_AI)
560                 return;
561
562         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
563
564         if (acrtc->dm_irq_params.stream &&
565             acrtc->dm_irq_params.vrr_params.supported &&
566             acrtc->dm_irq_params.freesync_config.state ==
567                     VRR_STATE_ACTIVE_VARIABLE) {
568                 mod_freesync_handle_v_update(adev->dm.freesync_module,
569                                              acrtc->dm_irq_params.stream,
570                                              &acrtc->dm_irq_params.vrr_params);
571
572                 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
573                                            &acrtc->dm_irq_params.vrr_params.adjust);
574         }
575
576         /*
577          * If there aren't any active_planes then DCH HUBP may be clock-gated.
578          * In that case, pageflip completion interrupts won't fire and pageflip
579          * completion events won't get delivered. Prevent this by sending
580          * pending pageflip events from here if a flip is still pending.
581          *
582          * If any planes are enabled, use dm_pflip_high_irq() instead, to
583          * avoid race conditions between flip programming and completion,
584          * which could cause too early flip completion events.
585          */
586         if (adev->family >= AMDGPU_FAMILY_RV &&
587             acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
588             acrtc->dm_irq_params.active_planes == 0) {
589                 if (acrtc->event) {
590                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
591                         acrtc->event = NULL;
592                         drm_crtc_vblank_put(&acrtc->base);
593                 }
594                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
595         }
596
597         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
598 }
599
600 #if defined(CONFIG_DRM_AMD_DC_DCN)
601 /**
602  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
603  * DCN generation ASICs
604  * @interrupt params - interrupt parameters
605  *
606  * Used to set crc window/read out crc value at vertical line 0 position
607  */
608 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
609 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
610 {
611         struct common_irq_params *irq_params = interrupt_params;
612         struct amdgpu_device *adev = irq_params->adev;
613         struct amdgpu_crtc *acrtc;
614
615         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
616
617         if (!acrtc)
618                 return;
619
620         amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
621 }
622 #endif
623
624 /**
625  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
626  * @interrupt_params: used for determining the Outbox instance
627  *
628  * Handles the Outbox Interrupt
629  * event handler.
630  */
631 #define DMUB_TRACE_MAX_READ 64
632 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
633 {
634         struct dmub_notification notify;
635         struct common_irq_params *irq_params = interrupt_params;
636         struct amdgpu_device *adev = irq_params->adev;
637         struct amdgpu_display_manager *dm = &adev->dm;
638         struct dmcub_trace_buf_entry entry = { 0 };
639         uint32_t count = 0;
640
641         if (dc_enable_dmub_notifications(adev->dm.dc)) {
642                 if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
643                         do {
644                                 dc_stat_get_dmub_notification(adev->dm.dc, &notify);
645                         } while (notify.pending_notification);
646
647                         if (adev->dm.dmub_notify)
648                                 memcpy(adev->dm.dmub_notify, &notify, sizeof(struct dmub_notification));
649                         if (notify.type == DMUB_NOTIFICATION_AUX_REPLY)
650                                 complete(&adev->dm.dmub_aux_transfer_done);
651                         // TODO : HPD Implementation
652
653                 } else {
654                         DRM_ERROR("DM: Failed to receive correct outbox IRQ !");
655                 }
656         }
657
658
659         do {
660                 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
661                         trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
662                                                         entry.param0, entry.param1);
663
664                         DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
665                                  entry.trace_code, entry.tick_count, entry.param0, entry.param1);
666                 } else
667                         break;
668
669                 count++;
670
671         } while (count <= DMUB_TRACE_MAX_READ);
672
673         ASSERT(count <= DMUB_TRACE_MAX_READ);
674 }
675 #endif
676
677 static int dm_set_clockgating_state(void *handle,
678                   enum amd_clockgating_state state)
679 {
680         return 0;
681 }
682
683 static int dm_set_powergating_state(void *handle,
684                   enum amd_powergating_state state)
685 {
686         return 0;
687 }
688
689 /* Prototypes of private functions */
690 static int dm_early_init(void* handle);
691
692 /* Allocate memory for FBC compressed data  */
693 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
694 {
695         struct drm_device *dev = connector->dev;
696         struct amdgpu_device *adev = drm_to_adev(dev);
697         struct dm_compressor_info *compressor = &adev->dm.compressor;
698         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
699         struct drm_display_mode *mode;
700         unsigned long max_size = 0;
701
702         if (adev->dm.dc->fbc_compressor == NULL)
703                 return;
704
705         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
706                 return;
707
708         if (compressor->bo_ptr)
709                 return;
710
711
712         list_for_each_entry(mode, &connector->modes, head) {
713                 if (max_size < mode->htotal * mode->vtotal)
714                         max_size = mode->htotal * mode->vtotal;
715         }
716
717         if (max_size) {
718                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
719                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
720                             &compressor->gpu_addr, &compressor->cpu_addr);
721
722                 if (r)
723                         DRM_ERROR("DM: Failed to initialize FBC\n");
724                 else {
725                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
726                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
727                 }
728
729         }
730
731 }
732
733 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
734                                           int pipe, bool *enabled,
735                                           unsigned char *buf, int max_bytes)
736 {
737         struct drm_device *dev = dev_get_drvdata(kdev);
738         struct amdgpu_device *adev = drm_to_adev(dev);
739         struct drm_connector *connector;
740         struct drm_connector_list_iter conn_iter;
741         struct amdgpu_dm_connector *aconnector;
742         int ret = 0;
743
744         *enabled = false;
745
746         mutex_lock(&adev->dm.audio_lock);
747
748         drm_connector_list_iter_begin(dev, &conn_iter);
749         drm_for_each_connector_iter(connector, &conn_iter) {
750                 aconnector = to_amdgpu_dm_connector(connector);
751                 if (aconnector->audio_inst != port)
752                         continue;
753
754                 *enabled = true;
755                 ret = drm_eld_size(connector->eld);
756                 memcpy(buf, connector->eld, min(max_bytes, ret));
757
758                 break;
759         }
760         drm_connector_list_iter_end(&conn_iter);
761
762         mutex_unlock(&adev->dm.audio_lock);
763
764         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
765
766         return ret;
767 }
768
769 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
770         .get_eld = amdgpu_dm_audio_component_get_eld,
771 };
772
773 static int amdgpu_dm_audio_component_bind(struct device *kdev,
774                                        struct device *hda_kdev, void *data)
775 {
776         struct drm_device *dev = dev_get_drvdata(kdev);
777         struct amdgpu_device *adev = drm_to_adev(dev);
778         struct drm_audio_component *acomp = data;
779
780         acomp->ops = &amdgpu_dm_audio_component_ops;
781         acomp->dev = kdev;
782         adev->dm.audio_component = acomp;
783
784         return 0;
785 }
786
787 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
788                                           struct device *hda_kdev, void *data)
789 {
790         struct drm_device *dev = dev_get_drvdata(kdev);
791         struct amdgpu_device *adev = drm_to_adev(dev);
792         struct drm_audio_component *acomp = data;
793
794         acomp->ops = NULL;
795         acomp->dev = NULL;
796         adev->dm.audio_component = NULL;
797 }
798
799 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
800         .bind   = amdgpu_dm_audio_component_bind,
801         .unbind = amdgpu_dm_audio_component_unbind,
802 };
803
804 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
805 {
806         int i, ret;
807
808         if (!amdgpu_audio)
809                 return 0;
810
811         adev->mode_info.audio.enabled = true;
812
813         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
814
815         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
816                 adev->mode_info.audio.pin[i].channels = -1;
817                 adev->mode_info.audio.pin[i].rate = -1;
818                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
819                 adev->mode_info.audio.pin[i].status_bits = 0;
820                 adev->mode_info.audio.pin[i].category_code = 0;
821                 adev->mode_info.audio.pin[i].connected = false;
822                 adev->mode_info.audio.pin[i].id =
823                         adev->dm.dc->res_pool->audios[i]->inst;
824                 adev->mode_info.audio.pin[i].offset = 0;
825         }
826
827         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
828         if (ret < 0)
829                 return ret;
830
831         adev->dm.audio_registered = true;
832
833         return 0;
834 }
835
836 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
837 {
838         if (!amdgpu_audio)
839                 return;
840
841         if (!adev->mode_info.audio.enabled)
842                 return;
843
844         if (adev->dm.audio_registered) {
845                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
846                 adev->dm.audio_registered = false;
847         }
848
849         /* TODO: Disable audio? */
850
851         adev->mode_info.audio.enabled = false;
852 }
853
854 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
855 {
856         struct drm_audio_component *acomp = adev->dm.audio_component;
857
858         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
859                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
860
861                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
862                                                  pin, -1);
863         }
864 }
865
866 static int dm_dmub_hw_init(struct amdgpu_device *adev)
867 {
868         const struct dmcub_firmware_header_v1_0 *hdr;
869         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
870         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
871         const struct firmware *dmub_fw = adev->dm.dmub_fw;
872         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
873         struct abm *abm = adev->dm.dc->res_pool->abm;
874         struct dmub_srv_hw_params hw_params;
875         enum dmub_status status;
876         const unsigned char *fw_inst_const, *fw_bss_data;
877         uint32_t i, fw_inst_const_size, fw_bss_data_size;
878         bool has_hw_support;
879
880         if (!dmub_srv)
881                 /* DMUB isn't supported on the ASIC. */
882                 return 0;
883
884         if (!fb_info) {
885                 DRM_ERROR("No framebuffer info for DMUB service.\n");
886                 return -EINVAL;
887         }
888
889         if (!dmub_fw) {
890                 /* Firmware required for DMUB support. */
891                 DRM_ERROR("No firmware provided for DMUB.\n");
892                 return -EINVAL;
893         }
894
895         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
896         if (status != DMUB_STATUS_OK) {
897                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
898                 return -EINVAL;
899         }
900
901         if (!has_hw_support) {
902                 DRM_INFO("DMUB unsupported on ASIC\n");
903                 return 0;
904         }
905
906         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
907
908         fw_inst_const = dmub_fw->data +
909                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
910                         PSP_HEADER_BYTES;
911
912         fw_bss_data = dmub_fw->data +
913                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
914                       le32_to_cpu(hdr->inst_const_bytes);
915
916         /* Copy firmware and bios info into FB memory. */
917         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
918                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
919
920         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
921
922         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
923          * amdgpu_ucode_init_single_fw will load dmub firmware
924          * fw_inst_const part to cw0; otherwise, the firmware back door load
925          * will be done by dm_dmub_hw_init
926          */
927         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
928                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
929                                 fw_inst_const_size);
930         }
931
932         if (fw_bss_data_size)
933                 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
934                        fw_bss_data, fw_bss_data_size);
935
936         /* Copy firmware bios info into FB memory. */
937         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
938                adev->bios_size);
939
940         /* Reset regions that need to be reset. */
941         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
942         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
943
944         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
945                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
946
947         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
948                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
949
950         /* Initialize hardware. */
951         memset(&hw_params, 0, sizeof(hw_params));
952         hw_params.fb_base = adev->gmc.fb_start;
953         hw_params.fb_offset = adev->gmc.aper_base;
954
955         /* backdoor load firmware and trigger dmub running */
956         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
957                 hw_params.load_inst_const = true;
958
959         if (dmcu)
960                 hw_params.psp_version = dmcu->psp_version;
961
962         for (i = 0; i < fb_info->num_fb; ++i)
963                 hw_params.fb[i] = &fb_info->fb[i];
964
965         status = dmub_srv_hw_init(dmub_srv, &hw_params);
966         if (status != DMUB_STATUS_OK) {
967                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
968                 return -EINVAL;
969         }
970
971         /* Wait for firmware load to finish. */
972         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
973         if (status != DMUB_STATUS_OK)
974                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
975
976         /* Init DMCU and ABM if available. */
977         if (dmcu && abm) {
978                 dmcu->funcs->dmcu_init(dmcu);
979                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
980         }
981
982         adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
983         if (!adev->dm.dc->ctx->dmub_srv) {
984                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
985                 return -ENOMEM;
986         }
987
988         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
989                  adev->dm.dmcub_fw_version);
990
991         return 0;
992 }
993
994 #if defined(CONFIG_DRM_AMD_DC_DCN)
995 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
996 {
997         uint64_t pt_base;
998         uint32_t logical_addr_low;
999         uint32_t logical_addr_high;
1000         uint32_t agp_base, agp_bot, agp_top;
1001         PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1002
1003         logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1004         pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1005
1006         if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1007                 /*
1008                  * Raven2 has a HW issue that it is unable to use the vram which
1009                  * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1010                  * workaround that increase system aperture high address (add 1)
1011                  * to get rid of the VM fault and hardware hang.
1012                  */
1013                 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1014         else
1015                 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1016
1017         agp_base = 0;
1018         agp_bot = adev->gmc.agp_start >> 24;
1019         agp_top = adev->gmc.agp_end >> 24;
1020
1021
1022         page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1023         page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1024         page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1025         page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1026         page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1027         page_table_base.low_part = lower_32_bits(pt_base);
1028
1029         pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1030         pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1031
1032         pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1033         pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1034         pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1035
1036         pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1037         pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1038         pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1039
1040         pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1041         pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1042         pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1043
1044         pa_config->is_hvm_enabled = 0;
1045
1046 }
1047 #endif
1048 #if defined(CONFIG_DRM_AMD_DC_DCN)
1049 static void event_mall_stutter(struct work_struct *work)
1050 {
1051
1052         struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
1053         struct amdgpu_display_manager *dm = vblank_work->dm;
1054
1055         mutex_lock(&dm->dc_lock);
1056
1057         if (vblank_work->enable)
1058                 dm->active_vblank_irq_count++;
1059         else if(dm->active_vblank_irq_count)
1060                 dm->active_vblank_irq_count--;
1061
1062         dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1063
1064         DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1065
1066         mutex_unlock(&dm->dc_lock);
1067 }
1068
1069 static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
1070 {
1071
1072         int max_caps = dc->caps.max_links;
1073         struct vblank_workqueue *vblank_work;
1074         int i = 0;
1075
1076         vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
1077         if (ZERO_OR_NULL_PTR(vblank_work)) {
1078                 kfree(vblank_work);
1079                 return NULL;
1080         }
1081
1082         for (i = 0; i < max_caps; i++)
1083                 INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
1084
1085         return vblank_work;
1086 }
1087 #endif
1088 static int amdgpu_dm_init(struct amdgpu_device *adev)
1089 {
1090         struct dc_init_data init_data;
1091 #ifdef CONFIG_DRM_AMD_DC_HDCP
1092         struct dc_callback_init init_params;
1093 #endif
1094         int r;
1095
1096         adev->dm.ddev = adev_to_drm(adev);
1097         adev->dm.adev = adev;
1098
1099         /* Zero all the fields */
1100         memset(&init_data, 0, sizeof(init_data));
1101 #ifdef CONFIG_DRM_AMD_DC_HDCP
1102         memset(&init_params, 0, sizeof(init_params));
1103 #endif
1104
1105         mutex_init(&adev->dm.dc_lock);
1106         mutex_init(&adev->dm.audio_lock);
1107 #if defined(CONFIG_DRM_AMD_DC_DCN)
1108         spin_lock_init(&adev->dm.vblank_lock);
1109 #endif
1110
1111         if(amdgpu_dm_irq_init(adev)) {
1112                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1113                 goto error;
1114         }
1115
1116         init_data.asic_id.chip_family = adev->family;
1117
1118         init_data.asic_id.pci_revision_id = adev->pdev->revision;
1119         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1120
1121         init_data.asic_id.vram_width = adev->gmc.vram_width;
1122         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1123         init_data.asic_id.atombios_base_address =
1124                 adev->mode_info.atom_context->bios;
1125
1126         init_data.driver = adev;
1127
1128         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1129
1130         if (!adev->dm.cgs_device) {
1131                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1132                 goto error;
1133         }
1134
1135         init_data.cgs_device = adev->dm.cgs_device;
1136
1137         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1138
1139         switch (adev->asic_type) {
1140         case CHIP_CARRIZO:
1141         case CHIP_STONEY:
1142         case CHIP_RAVEN:
1143         case CHIP_RENOIR:
1144                 init_data.flags.gpu_vm_support = true;
1145                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1146                         init_data.flags.disable_dmcu = true;
1147                 break;
1148 #if defined(CONFIG_DRM_AMD_DC_DCN)
1149         case CHIP_VANGOGH:
1150                 init_data.flags.gpu_vm_support = true;
1151                 break;
1152 #endif
1153         default:
1154                 break;
1155         }
1156
1157         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1158                 init_data.flags.fbc_support = true;
1159
1160         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1161                 init_data.flags.multi_mon_pp_mclk_switch = true;
1162
1163         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1164                 init_data.flags.disable_fractional_pwm = true;
1165
1166         init_data.flags.power_down_display_on_boot = true;
1167
1168         INIT_LIST_HEAD(&adev->dm.da_list);
1169         /* Display Core create. */
1170         adev->dm.dc = dc_create(&init_data);
1171
1172         if (adev->dm.dc) {
1173                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1174         } else {
1175                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1176                 goto error;
1177         }
1178
1179         if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1180                 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1181                 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1182         }
1183
1184         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1185                 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1186
1187         if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1188                 adev->dm.dc->debug.disable_stutter = true;
1189
1190         if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1191                 adev->dm.dc->debug.disable_dsc = true;
1192
1193         if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1194                 adev->dm.dc->debug.disable_clock_gate = true;
1195
1196         r = dm_dmub_hw_init(adev);
1197         if (r) {
1198                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1199                 goto error;
1200         }
1201
1202         dc_hardware_init(adev->dm.dc);
1203
1204 #if defined(CONFIG_DRM_AMD_DC_DCN)
1205         if (adev->apu_flags) {
1206                 struct dc_phy_addr_space_config pa_config;
1207
1208                 mmhub_read_system_context(adev, &pa_config);
1209
1210                 // Call the DC init_memory func
1211                 dc_setup_system_context(adev->dm.dc, &pa_config);
1212         }
1213 #endif
1214
1215         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1216         if (!adev->dm.freesync_module) {
1217                 DRM_ERROR(
1218                 "amdgpu: failed to initialize freesync_module.\n");
1219         } else
1220                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1221                                 adev->dm.freesync_module);
1222
1223         amdgpu_dm_init_color_mod();
1224
1225 #if defined(CONFIG_DRM_AMD_DC_DCN)
1226         if (adev->dm.dc->caps.max_links > 0) {
1227                 adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
1228
1229                 if (!adev->dm.vblank_workqueue)
1230                         DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1231                 else
1232                         DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
1233         }
1234 #endif
1235
1236 #ifdef CONFIG_DRM_AMD_DC_HDCP
1237         if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1238                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1239
1240                 if (!adev->dm.hdcp_workqueue)
1241                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1242                 else
1243                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1244
1245                 dc_init_callbacks(adev->dm.dc, &init_params);
1246         }
1247 #endif
1248 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1249         adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1250 #endif
1251         if (dc_enable_dmub_notifications(adev->dm.dc)) {
1252                 init_completion(&adev->dm.dmub_aux_transfer_done);
1253                 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1254                 if (!adev->dm.dmub_notify) {
1255                         DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1256                         goto error;
1257                 }
1258                 amdgpu_dm_outbox_init(adev);
1259         }
1260
1261         if (amdgpu_dm_initialize_drm_device(adev)) {
1262                 DRM_ERROR(
1263                 "amdgpu: failed to initialize sw for display support.\n");
1264                 goto error;
1265         }
1266
1267         /* create fake encoders for MST */
1268         dm_dp_create_fake_mst_encoders(adev);
1269
1270         /* TODO: Add_display_info? */
1271
1272         /* TODO use dynamic cursor width */
1273         adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1274         adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1275
1276         if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1277                 DRM_ERROR(
1278                 "amdgpu: failed to initialize sw for display support.\n");
1279                 goto error;
1280         }
1281
1282
1283         DRM_DEBUG_DRIVER("KMS initialized.\n");
1284
1285         return 0;
1286 error:
1287         amdgpu_dm_fini(adev);
1288
1289         return -EINVAL;
1290 }
1291
1292 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1293 {
1294         int i;
1295
1296         for (i = 0; i < adev->dm.display_indexes_num; i++) {
1297                 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1298         }
1299
1300         amdgpu_dm_audio_fini(adev);
1301
1302         amdgpu_dm_destroy_drm_device(&adev->dm);
1303
1304 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1305         if (adev->dm.crc_rd_wrk) {
1306                 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1307                 kfree(adev->dm.crc_rd_wrk);
1308                 adev->dm.crc_rd_wrk = NULL;
1309         }
1310 #endif
1311 #ifdef CONFIG_DRM_AMD_DC_HDCP
1312         if (adev->dm.hdcp_workqueue) {
1313                 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1314                 adev->dm.hdcp_workqueue = NULL;
1315         }
1316
1317         if (adev->dm.dc)
1318                 dc_deinit_callbacks(adev->dm.dc);
1319 #endif
1320
1321 #if defined(CONFIG_DRM_AMD_DC_DCN)
1322         if (adev->dm.vblank_workqueue) {
1323                 adev->dm.vblank_workqueue->dm = NULL;
1324                 kfree(adev->dm.vblank_workqueue);
1325                 adev->dm.vblank_workqueue = NULL;
1326         }
1327 #endif
1328
1329         if (adev->dm.dc->ctx->dmub_srv) {
1330                 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1331                 adev->dm.dc->ctx->dmub_srv = NULL;
1332         }
1333
1334         if (dc_enable_dmub_notifications(adev->dm.dc)) {
1335                 kfree(adev->dm.dmub_notify);
1336                 adev->dm.dmub_notify = NULL;
1337         }
1338
1339         if (adev->dm.dmub_bo)
1340                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1341                                       &adev->dm.dmub_bo_gpu_addr,
1342                                       &adev->dm.dmub_bo_cpu_addr);
1343
1344         /* DC Destroy TODO: Replace destroy DAL */
1345         if (adev->dm.dc)
1346                 dc_destroy(&adev->dm.dc);
1347         /*
1348          * TODO: pageflip, vlank interrupt
1349          *
1350          * amdgpu_dm_irq_fini(adev);
1351          */
1352
1353         if (adev->dm.cgs_device) {
1354                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1355                 adev->dm.cgs_device = NULL;
1356         }
1357         if (adev->dm.freesync_module) {
1358                 mod_freesync_destroy(adev->dm.freesync_module);
1359                 adev->dm.freesync_module = NULL;
1360         }
1361
1362         mutex_destroy(&adev->dm.audio_lock);
1363         mutex_destroy(&adev->dm.dc_lock);
1364
1365         return;
1366 }
1367
1368 static int load_dmcu_fw(struct amdgpu_device *adev)
1369 {
1370         const char *fw_name_dmcu = NULL;
1371         int r;
1372         const struct dmcu_firmware_header_v1_0 *hdr;
1373
1374         switch(adev->asic_type) {
1375 #if defined(CONFIG_DRM_AMD_DC_SI)
1376         case CHIP_TAHITI:
1377         case CHIP_PITCAIRN:
1378         case CHIP_VERDE:
1379         case CHIP_OLAND:
1380 #endif
1381         case CHIP_BONAIRE:
1382         case CHIP_HAWAII:
1383         case CHIP_KAVERI:
1384         case CHIP_KABINI:
1385         case CHIP_MULLINS:
1386         case CHIP_TONGA:
1387         case CHIP_FIJI:
1388         case CHIP_CARRIZO:
1389         case CHIP_STONEY:
1390         case CHIP_POLARIS11:
1391         case CHIP_POLARIS10:
1392         case CHIP_POLARIS12:
1393         case CHIP_VEGAM:
1394         case CHIP_VEGA10:
1395         case CHIP_VEGA12:
1396         case CHIP_VEGA20:
1397         case CHIP_NAVI10:
1398         case CHIP_NAVI14:
1399         case CHIP_RENOIR:
1400         case CHIP_SIENNA_CICHLID:
1401         case CHIP_NAVY_FLOUNDER:
1402         case CHIP_DIMGREY_CAVEFISH:
1403         case CHIP_VANGOGH:
1404                 return 0;
1405         case CHIP_NAVI12:
1406                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1407                 break;
1408         case CHIP_RAVEN:
1409                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1410                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1411                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1412                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1413                 else
1414                         return 0;
1415                 break;
1416         default:
1417                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1418                 return -EINVAL;
1419         }
1420
1421         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1422                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1423                 return 0;
1424         }
1425
1426         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1427         if (r == -ENOENT) {
1428                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1429                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1430                 adev->dm.fw_dmcu = NULL;
1431                 return 0;
1432         }
1433         if (r) {
1434                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1435                         fw_name_dmcu);
1436                 return r;
1437         }
1438
1439         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1440         if (r) {
1441                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1442                         fw_name_dmcu);
1443                 release_firmware(adev->dm.fw_dmcu);
1444                 adev->dm.fw_dmcu = NULL;
1445                 return r;
1446         }
1447
1448         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1449         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1450         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1451         adev->firmware.fw_size +=
1452                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1453
1454         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1455         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1456         adev->firmware.fw_size +=
1457                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1458
1459         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1460
1461         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1462
1463         return 0;
1464 }
1465
1466 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1467 {
1468         struct amdgpu_device *adev = ctx;
1469
1470         return dm_read_reg(adev->dm.dc->ctx, address);
1471 }
1472
1473 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1474                                      uint32_t value)
1475 {
1476         struct amdgpu_device *adev = ctx;
1477
1478         return dm_write_reg(adev->dm.dc->ctx, address, value);
1479 }
1480
1481 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1482 {
1483         struct dmub_srv_create_params create_params;
1484         struct dmub_srv_region_params region_params;
1485         struct dmub_srv_region_info region_info;
1486         struct dmub_srv_fb_params fb_params;
1487         struct dmub_srv_fb_info *fb_info;
1488         struct dmub_srv *dmub_srv;
1489         const struct dmcub_firmware_header_v1_0 *hdr;
1490         const char *fw_name_dmub;
1491         enum dmub_asic dmub_asic;
1492         enum dmub_status status;
1493         int r;
1494
1495         switch (adev->asic_type) {
1496         case CHIP_RENOIR:
1497                 dmub_asic = DMUB_ASIC_DCN21;
1498                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1499                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1500                         fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1501                 break;
1502         case CHIP_SIENNA_CICHLID:
1503                 dmub_asic = DMUB_ASIC_DCN30;
1504                 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1505                 break;
1506         case CHIP_NAVY_FLOUNDER:
1507                 dmub_asic = DMUB_ASIC_DCN30;
1508                 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1509                 break;
1510         case CHIP_VANGOGH:
1511                 dmub_asic = DMUB_ASIC_DCN301;
1512                 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1513                 break;
1514         case CHIP_DIMGREY_CAVEFISH:
1515                 dmub_asic = DMUB_ASIC_DCN302;
1516                 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1517                 break;
1518
1519         default:
1520                 /* ASIC doesn't support DMUB. */
1521                 return 0;
1522         }
1523
1524         r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1525         if (r) {
1526                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1527                 return 0;
1528         }
1529
1530         r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1531         if (r) {
1532                 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1533                 return 0;
1534         }
1535
1536         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1537
1538         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1539                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1540                         AMDGPU_UCODE_ID_DMCUB;
1541                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1542                         adev->dm.dmub_fw;
1543                 adev->firmware.fw_size +=
1544                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1545
1546                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1547                          adev->dm.dmcub_fw_version);
1548         }
1549
1550         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1551
1552         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1553         dmub_srv = adev->dm.dmub_srv;
1554
1555         if (!dmub_srv) {
1556                 DRM_ERROR("Failed to allocate DMUB service!\n");
1557                 return -ENOMEM;
1558         }
1559
1560         memset(&create_params, 0, sizeof(create_params));
1561         create_params.user_ctx = adev;
1562         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1563         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1564         create_params.asic = dmub_asic;
1565
1566         /* Create the DMUB service. */
1567         status = dmub_srv_create(dmub_srv, &create_params);
1568         if (status != DMUB_STATUS_OK) {
1569                 DRM_ERROR("Error creating DMUB service: %d\n", status);
1570                 return -EINVAL;
1571         }
1572
1573         /* Calculate the size of all the regions for the DMUB service. */
1574         memset(&region_params, 0, sizeof(region_params));
1575
1576         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1577                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1578         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1579         region_params.vbios_size = adev->bios_size;
1580         region_params.fw_bss_data = region_params.bss_data_size ?
1581                 adev->dm.dmub_fw->data +
1582                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1583                 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1584         region_params.fw_inst_const =
1585                 adev->dm.dmub_fw->data +
1586                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1587                 PSP_HEADER_BYTES;
1588
1589         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1590                                            &region_info);
1591
1592         if (status != DMUB_STATUS_OK) {
1593                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1594                 return -EINVAL;
1595         }
1596
1597         /*
1598          * Allocate a framebuffer based on the total size of all the regions.
1599          * TODO: Move this into GART.
1600          */
1601         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1602                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1603                                     &adev->dm.dmub_bo_gpu_addr,
1604                                     &adev->dm.dmub_bo_cpu_addr);
1605         if (r)
1606                 return r;
1607
1608         /* Rebase the regions on the framebuffer address. */
1609         memset(&fb_params, 0, sizeof(fb_params));
1610         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1611         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1612         fb_params.region_info = &region_info;
1613
1614         adev->dm.dmub_fb_info =
1615                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1616         fb_info = adev->dm.dmub_fb_info;
1617
1618         if (!fb_info) {
1619                 DRM_ERROR(
1620                         "Failed to allocate framebuffer info for DMUB service!\n");
1621                 return -ENOMEM;
1622         }
1623
1624         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1625         if (status != DMUB_STATUS_OK) {
1626                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1627                 return -EINVAL;
1628         }
1629
1630         return 0;
1631 }
1632
1633 static int dm_sw_init(void *handle)
1634 {
1635         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1636         int r;
1637
1638         r = dm_dmub_sw_init(adev);
1639         if (r)
1640                 return r;
1641
1642         return load_dmcu_fw(adev);
1643 }
1644
1645 static int dm_sw_fini(void *handle)
1646 {
1647         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1648
1649         kfree(adev->dm.dmub_fb_info);
1650         adev->dm.dmub_fb_info = NULL;
1651
1652         if (adev->dm.dmub_srv) {
1653                 dmub_srv_destroy(adev->dm.dmub_srv);
1654                 adev->dm.dmub_srv = NULL;
1655         }
1656
1657         release_firmware(adev->dm.dmub_fw);
1658         adev->dm.dmub_fw = NULL;
1659
1660         release_firmware(adev->dm.fw_dmcu);
1661         adev->dm.fw_dmcu = NULL;
1662
1663         return 0;
1664 }
1665
1666 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1667 {
1668         struct amdgpu_dm_connector *aconnector;
1669         struct drm_connector *connector;
1670         struct drm_connector_list_iter iter;
1671         int ret = 0;
1672
1673         drm_connector_list_iter_begin(dev, &iter);
1674         drm_for_each_connector_iter(connector, &iter) {
1675                 aconnector = to_amdgpu_dm_connector(connector);
1676                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1677                     aconnector->mst_mgr.aux) {
1678                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1679                                          aconnector,
1680                                          aconnector->base.base.id);
1681
1682                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1683                         if (ret < 0) {
1684                                 DRM_ERROR("DM_MST: Failed to start MST\n");
1685                                 aconnector->dc_link->type =
1686                                         dc_connection_single;
1687                                 break;
1688                         }
1689                 }
1690         }
1691         drm_connector_list_iter_end(&iter);
1692
1693         return ret;
1694 }
1695
1696 static int dm_late_init(void *handle)
1697 {
1698         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1699
1700         struct dmcu_iram_parameters params;
1701         unsigned int linear_lut[16];
1702         int i;
1703         struct dmcu *dmcu = NULL;
1704         bool ret = true;
1705
1706         dmcu = adev->dm.dc->res_pool->dmcu;
1707
1708         for (i = 0; i < 16; i++)
1709                 linear_lut[i] = 0xFFFF * i / 15;
1710
1711         params.set = 0;
1712         params.backlight_ramping_start = 0xCCCC;
1713         params.backlight_ramping_reduction = 0xCCCCCCCC;
1714         params.backlight_lut_array_size = 16;
1715         params.backlight_lut_array = linear_lut;
1716
1717         /* Min backlight level after ABM reduction,  Don't allow below 1%
1718          * 0xFFFF x 0.01 = 0x28F
1719          */
1720         params.min_abm_backlight = 0x28F;
1721
1722         /* In the case where abm is implemented on dmcub,
1723          * dmcu object will be null.
1724          * ABM 2.4 and up are implemented on dmcub.
1725          */
1726         if (dmcu)
1727                 ret = dmcu_load_iram(dmcu, params);
1728         else if (adev->dm.dc->ctx->dmub_srv)
1729                 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1730
1731         if (!ret)
1732                 return -EINVAL;
1733
1734         return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1735 }
1736
1737 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1738 {
1739         struct amdgpu_dm_connector *aconnector;
1740         struct drm_connector *connector;
1741         struct drm_connector_list_iter iter;
1742         struct drm_dp_mst_topology_mgr *mgr;
1743         int ret;
1744         bool need_hotplug = false;
1745
1746         drm_connector_list_iter_begin(dev, &iter);
1747         drm_for_each_connector_iter(connector, &iter) {
1748                 aconnector = to_amdgpu_dm_connector(connector);
1749                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1750                     aconnector->mst_port)
1751                         continue;
1752
1753                 mgr = &aconnector->mst_mgr;
1754
1755                 if (suspend) {
1756                         drm_dp_mst_topology_mgr_suspend(mgr);
1757                 } else {
1758                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1759                         if (ret < 0) {
1760                                 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1761                                 need_hotplug = true;
1762                         }
1763                 }
1764         }
1765         drm_connector_list_iter_end(&iter);
1766
1767         if (need_hotplug)
1768                 drm_kms_helper_hotplug_event(dev);
1769 }
1770
1771 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1772 {
1773         struct smu_context *smu = &adev->smu;
1774         int ret = 0;
1775
1776         if (!is_support_sw_smu(adev))
1777                 return 0;
1778
1779         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1780          * on window driver dc implementation.
1781          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1782          * should be passed to smu during boot up and resume from s3.
1783          * boot up: dc calculate dcn watermark clock settings within dc_create,
1784          * dcn20_resource_construct
1785          * then call pplib functions below to pass the settings to smu:
1786          * smu_set_watermarks_for_clock_ranges
1787          * smu_set_watermarks_table
1788          * navi10_set_watermarks_table
1789          * smu_write_watermarks_table
1790          *
1791          * For Renoir, clock settings of dcn watermark are also fixed values.
1792          * dc has implemented different flow for window driver:
1793          * dc_hardware_init / dc_set_power_state
1794          * dcn10_init_hw
1795          * notify_wm_ranges
1796          * set_wm_ranges
1797          * -- Linux
1798          * smu_set_watermarks_for_clock_ranges
1799          * renoir_set_watermarks_table
1800          * smu_write_watermarks_table
1801          *
1802          * For Linux,
1803          * dc_hardware_init -> amdgpu_dm_init
1804          * dc_set_power_state --> dm_resume
1805          *
1806          * therefore, this function apply to navi10/12/14 but not Renoir
1807          * *
1808          */
1809         switch(adev->asic_type) {
1810         case CHIP_NAVI10:
1811         case CHIP_NAVI14:
1812         case CHIP_NAVI12:
1813                 break;
1814         default:
1815                 return 0;
1816         }
1817
1818         ret = smu_write_watermarks_table(smu);
1819         if (ret) {
1820                 DRM_ERROR("Failed to update WMTABLE!\n");
1821                 return ret;
1822         }
1823
1824         return 0;
1825 }
1826
1827 /**
1828  * dm_hw_init() - Initialize DC device
1829  * @handle: The base driver device containing the amdgpu_dm device.
1830  *
1831  * Initialize the &struct amdgpu_display_manager device. This involves calling
1832  * the initializers of each DM component, then populating the struct with them.
1833  *
1834  * Although the function implies hardware initialization, both hardware and
1835  * software are initialized here. Splitting them out to their relevant init
1836  * hooks is a future TODO item.
1837  *
1838  * Some notable things that are initialized here:
1839  *
1840  * - Display Core, both software and hardware
1841  * - DC modules that we need (freesync and color management)
1842  * - DRM software states
1843  * - Interrupt sources and handlers
1844  * - Vblank support
1845  * - Debug FS entries, if enabled
1846  */
1847 static int dm_hw_init(void *handle)
1848 {
1849         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1850         /* Create DAL display manager */
1851         amdgpu_dm_init(adev);
1852         amdgpu_dm_hpd_init(adev);
1853
1854         return 0;
1855 }
1856
1857 /**
1858  * dm_hw_fini() - Teardown DC device
1859  * @handle: The base driver device containing the amdgpu_dm device.
1860  *
1861  * Teardown components within &struct amdgpu_display_manager that require
1862  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1863  * were loaded. Also flush IRQ workqueues and disable them.
1864  */
1865 static int dm_hw_fini(void *handle)
1866 {
1867         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1868
1869         amdgpu_dm_hpd_fini(adev);
1870
1871         amdgpu_dm_irq_fini(adev);
1872         amdgpu_dm_fini(adev);
1873         return 0;
1874 }
1875
1876
1877 static int dm_enable_vblank(struct drm_crtc *crtc);
1878 static void dm_disable_vblank(struct drm_crtc *crtc);
1879
1880 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1881                                  struct dc_state *state, bool enable)
1882 {
1883         enum dc_irq_source irq_source;
1884         struct amdgpu_crtc *acrtc;
1885         int rc = -EBUSY;
1886         int i = 0;
1887
1888         for (i = 0; i < state->stream_count; i++) {
1889                 acrtc = get_crtc_by_otg_inst(
1890                                 adev, state->stream_status[i].primary_otg_inst);
1891
1892                 if (acrtc && state->stream_status[i].plane_count != 0) {
1893                         irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1894                         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1895                         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
1896                                       acrtc->crtc_id, enable ? "en" : "dis", rc);
1897                         if (rc)
1898                                 DRM_WARN("Failed to %s pflip interrupts\n",
1899                                          enable ? "enable" : "disable");
1900
1901                         if (enable) {
1902                                 rc = dm_enable_vblank(&acrtc->base);
1903                                 if (rc)
1904                                         DRM_WARN("Failed to enable vblank interrupts\n");
1905                         } else {
1906                                 dm_disable_vblank(&acrtc->base);
1907                         }
1908
1909                 }
1910         }
1911
1912 }
1913
1914 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1915 {
1916         struct dc_state *context = NULL;
1917         enum dc_status res = DC_ERROR_UNEXPECTED;
1918         int i;
1919         struct dc_stream_state *del_streams[MAX_PIPES];
1920         int del_streams_count = 0;
1921
1922         memset(del_streams, 0, sizeof(del_streams));
1923
1924         context = dc_create_state(dc);
1925         if (context == NULL)
1926                 goto context_alloc_fail;
1927
1928         dc_resource_state_copy_construct_current(dc, context);
1929
1930         /* First remove from context all streams */
1931         for (i = 0; i < context->stream_count; i++) {
1932                 struct dc_stream_state *stream = context->streams[i];
1933
1934                 del_streams[del_streams_count++] = stream;
1935         }
1936
1937         /* Remove all planes for removed streams and then remove the streams */
1938         for (i = 0; i < del_streams_count; i++) {
1939                 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1940                         res = DC_FAIL_DETACH_SURFACES;
1941                         goto fail;
1942                 }
1943
1944                 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1945                 if (res != DC_OK)
1946                         goto fail;
1947         }
1948
1949
1950         res = dc_validate_global_state(dc, context, false);
1951
1952         if (res != DC_OK) {
1953                 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1954                 goto fail;
1955         }
1956
1957         res = dc_commit_state(dc, context);
1958
1959 fail:
1960         dc_release_state(context);
1961
1962 context_alloc_fail:
1963         return res;
1964 }
1965
1966 static int dm_suspend(void *handle)
1967 {
1968         struct amdgpu_device *adev = handle;
1969         struct amdgpu_display_manager *dm = &adev->dm;
1970         int ret = 0;
1971
1972         if (amdgpu_in_reset(adev)) {
1973                 mutex_lock(&dm->dc_lock);
1974
1975 #if defined(CONFIG_DRM_AMD_DC_DCN)
1976                 dc_allow_idle_optimizations(adev->dm.dc, false);
1977 #endif
1978
1979                 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1980
1981                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1982
1983                 amdgpu_dm_commit_zero_streams(dm->dc);
1984
1985                 amdgpu_dm_irq_suspend(adev);
1986
1987                 return ret;
1988         }
1989
1990 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
1991         amdgpu_dm_crtc_secure_display_suspend(adev);
1992 #endif
1993         WARN_ON(adev->dm.cached_state);
1994         adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1995
1996         s3_handle_mst(adev_to_drm(adev), true);
1997
1998         amdgpu_dm_irq_suspend(adev);
1999
2000
2001         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2002
2003         return 0;
2004 }
2005
2006 static struct amdgpu_dm_connector *
2007 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2008                                              struct drm_crtc *crtc)
2009 {
2010         uint32_t i;
2011         struct drm_connector_state *new_con_state;
2012         struct drm_connector *connector;
2013         struct drm_crtc *crtc_from_state;
2014
2015         for_each_new_connector_in_state(state, connector, new_con_state, i) {
2016                 crtc_from_state = new_con_state->crtc;
2017
2018                 if (crtc_from_state == crtc)
2019                         return to_amdgpu_dm_connector(connector);
2020         }
2021
2022         return NULL;
2023 }
2024
2025 static void emulated_link_detect(struct dc_link *link)
2026 {
2027         struct dc_sink_init_data sink_init_data = { 0 };
2028         struct display_sink_capability sink_caps = { 0 };
2029         enum dc_edid_status edid_status;
2030         struct dc_context *dc_ctx = link->ctx;
2031         struct dc_sink *sink = NULL;
2032         struct dc_sink *prev_sink = NULL;
2033
2034         link->type = dc_connection_none;
2035         prev_sink = link->local_sink;
2036
2037         if (prev_sink)
2038                 dc_sink_release(prev_sink);
2039
2040         switch (link->connector_signal) {
2041         case SIGNAL_TYPE_HDMI_TYPE_A: {
2042                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2043                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2044                 break;
2045         }
2046
2047         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2048                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2049                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2050                 break;
2051         }
2052
2053         case SIGNAL_TYPE_DVI_DUAL_LINK: {
2054                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2055                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2056                 break;
2057         }
2058
2059         case SIGNAL_TYPE_LVDS: {
2060                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2061                 sink_caps.signal = SIGNAL_TYPE_LVDS;
2062                 break;
2063         }
2064
2065         case SIGNAL_TYPE_EDP: {
2066                 sink_caps.transaction_type =
2067                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2068                 sink_caps.signal = SIGNAL_TYPE_EDP;
2069                 break;
2070         }
2071
2072         case SIGNAL_TYPE_DISPLAY_PORT: {
2073                 sink_caps.transaction_type =
2074                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2075                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2076                 break;
2077         }
2078
2079         default:
2080                 DC_ERROR("Invalid connector type! signal:%d\n",
2081                         link->connector_signal);
2082                 return;
2083         }
2084
2085         sink_init_data.link = link;
2086         sink_init_data.sink_signal = sink_caps.signal;
2087
2088         sink = dc_sink_create(&sink_init_data);
2089         if (!sink) {
2090                 DC_ERROR("Failed to create sink!\n");
2091                 return;
2092         }
2093
2094         /* dc_sink_create returns a new reference */
2095         link->local_sink = sink;
2096
2097         edid_status = dm_helpers_read_local_edid(
2098                         link->ctx,
2099                         link,
2100                         sink);
2101
2102         if (edid_status != EDID_OK)
2103                 DC_ERROR("Failed to read EDID");
2104
2105 }
2106
2107 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2108                                      struct amdgpu_display_manager *dm)
2109 {
2110         struct {
2111                 struct dc_surface_update surface_updates[MAX_SURFACES];
2112                 struct dc_plane_info plane_infos[MAX_SURFACES];
2113                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2114                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2115                 struct dc_stream_update stream_update;
2116         } * bundle;
2117         int k, m;
2118
2119         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2120
2121         if (!bundle) {
2122                 dm_error("Failed to allocate update bundle\n");
2123                 goto cleanup;
2124         }
2125
2126         for (k = 0; k < dc_state->stream_count; k++) {
2127                 bundle->stream_update.stream = dc_state->streams[k];
2128
2129                 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2130                         bundle->surface_updates[m].surface =
2131                                 dc_state->stream_status->plane_states[m];
2132                         bundle->surface_updates[m].surface->force_full_update =
2133                                 true;
2134                 }
2135                 dc_commit_updates_for_stream(
2136                         dm->dc, bundle->surface_updates,
2137                         dc_state->stream_status->plane_count,
2138                         dc_state->streams[k], &bundle->stream_update, dc_state);
2139         }
2140
2141 cleanup:
2142         kfree(bundle);
2143
2144         return;
2145 }
2146
2147 static void dm_set_dpms_off(struct dc_link *link)
2148 {
2149         struct dc_stream_state *stream_state;
2150         struct amdgpu_dm_connector *aconnector = link->priv;
2151         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2152         struct dc_stream_update stream_update;
2153         bool dpms_off = true;
2154
2155         memset(&stream_update, 0, sizeof(stream_update));
2156         stream_update.dpms_off = &dpms_off;
2157
2158         mutex_lock(&adev->dm.dc_lock);
2159         stream_state = dc_stream_find_from_link(link);
2160
2161         if (stream_state == NULL) {
2162                 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2163                 mutex_unlock(&adev->dm.dc_lock);
2164                 return;
2165         }
2166
2167         stream_update.stream = stream_state;
2168         dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2169                                      stream_state, &stream_update,
2170                                      stream_state->ctx->dc->current_state);
2171         mutex_unlock(&adev->dm.dc_lock);
2172 }
2173
2174 static int dm_resume(void *handle)
2175 {
2176         struct amdgpu_device *adev = handle;
2177         struct drm_device *ddev = adev_to_drm(adev);
2178         struct amdgpu_display_manager *dm = &adev->dm;
2179         struct amdgpu_dm_connector *aconnector;
2180         struct drm_connector *connector;
2181         struct drm_connector_list_iter iter;
2182         struct drm_crtc *crtc;
2183         struct drm_crtc_state *new_crtc_state;
2184         struct dm_crtc_state *dm_new_crtc_state;
2185         struct drm_plane *plane;
2186         struct drm_plane_state *new_plane_state;
2187         struct dm_plane_state *dm_new_plane_state;
2188         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2189         enum dc_connection_type new_connection_type = dc_connection_none;
2190         struct dc_state *dc_state;
2191         int i, r, j;
2192
2193         if (amdgpu_in_reset(adev)) {
2194                 dc_state = dm->cached_dc_state;
2195
2196                 r = dm_dmub_hw_init(adev);
2197                 if (r)
2198                         DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2199
2200                 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2201                 dc_resume(dm->dc);
2202
2203                 amdgpu_dm_irq_resume_early(adev);
2204
2205                 for (i = 0; i < dc_state->stream_count; i++) {
2206                         dc_state->streams[i]->mode_changed = true;
2207                         for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2208                                 dc_state->stream_status->plane_states[j]->update_flags.raw
2209                                         = 0xffffffff;
2210                         }
2211                 }
2212
2213                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2214
2215                 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2216
2217                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2218
2219                 dc_release_state(dm->cached_dc_state);
2220                 dm->cached_dc_state = NULL;
2221
2222                 amdgpu_dm_irq_resume_late(adev);
2223
2224                 mutex_unlock(&dm->dc_lock);
2225
2226                 return 0;
2227         }
2228         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2229         dc_release_state(dm_state->context);
2230         dm_state->context = dc_create_state(dm->dc);
2231         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2232         dc_resource_state_construct(dm->dc, dm_state->context);
2233
2234         /* Before powering on DC we need to re-initialize DMUB. */
2235         r = dm_dmub_hw_init(adev);
2236         if (r)
2237                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2238
2239         /* power on hardware */
2240         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2241
2242         /* program HPD filter */
2243         dc_resume(dm->dc);
2244
2245         /*
2246          * early enable HPD Rx IRQ, should be done before set mode as short
2247          * pulse interrupts are used for MST
2248          */
2249         amdgpu_dm_irq_resume_early(adev);
2250
2251         /* On resume we need to rewrite the MSTM control bits to enable MST*/
2252         s3_handle_mst(ddev, false);
2253
2254         /* Do detection*/
2255         drm_connector_list_iter_begin(ddev, &iter);
2256         drm_for_each_connector_iter(connector, &iter) {
2257                 aconnector = to_amdgpu_dm_connector(connector);
2258
2259                 /*
2260                  * this is the case when traversing through already created
2261                  * MST connectors, should be skipped
2262                  */
2263                 if (aconnector->mst_port)
2264                         continue;
2265
2266                 mutex_lock(&aconnector->hpd_lock);
2267                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2268                         DRM_ERROR("KMS: Failed to detect connector\n");
2269
2270                 if (aconnector->base.force && new_connection_type == dc_connection_none)
2271                         emulated_link_detect(aconnector->dc_link);
2272                 else
2273                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2274
2275                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2276                         aconnector->fake_enable = false;
2277
2278                 if (aconnector->dc_sink)
2279                         dc_sink_release(aconnector->dc_sink);
2280                 aconnector->dc_sink = NULL;
2281                 amdgpu_dm_update_connector_after_detect(aconnector);
2282                 mutex_unlock(&aconnector->hpd_lock);
2283         }
2284         drm_connector_list_iter_end(&iter);
2285
2286         /* Force mode set in atomic commit */
2287         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2288                 new_crtc_state->active_changed = true;
2289
2290         /*
2291          * atomic_check is expected to create the dc states. We need to release
2292          * them here, since they were duplicated as part of the suspend
2293          * procedure.
2294          */
2295         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2296                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2297                 if (dm_new_crtc_state->stream) {
2298                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2299                         dc_stream_release(dm_new_crtc_state->stream);
2300                         dm_new_crtc_state->stream = NULL;
2301                 }
2302         }
2303
2304         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2305                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2306                 if (dm_new_plane_state->dc_state) {
2307                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2308                         dc_plane_state_release(dm_new_plane_state->dc_state);
2309                         dm_new_plane_state->dc_state = NULL;
2310                 }
2311         }
2312
2313         drm_atomic_helper_resume(ddev, dm->cached_state);
2314
2315         dm->cached_state = NULL;
2316
2317 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
2318         amdgpu_dm_crtc_secure_display_resume(adev);
2319 #endif
2320
2321         amdgpu_dm_irq_resume_late(adev);
2322
2323         amdgpu_dm_smu_write_watermarks_table(adev);
2324
2325         return 0;
2326 }
2327
2328 /**
2329  * DOC: DM Lifecycle
2330  *
2331  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2332  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2333  * the base driver's device list to be initialized and torn down accordingly.
2334  *
2335  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2336  */
2337
2338 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2339         .name = "dm",
2340         .early_init = dm_early_init,
2341         .late_init = dm_late_init,
2342         .sw_init = dm_sw_init,
2343         .sw_fini = dm_sw_fini,
2344         .hw_init = dm_hw_init,
2345         .hw_fini = dm_hw_fini,
2346         .suspend = dm_suspend,
2347         .resume = dm_resume,
2348         .is_idle = dm_is_idle,
2349         .wait_for_idle = dm_wait_for_idle,
2350         .check_soft_reset = dm_check_soft_reset,
2351         .soft_reset = dm_soft_reset,
2352         .set_clockgating_state = dm_set_clockgating_state,
2353         .set_powergating_state = dm_set_powergating_state,
2354 };
2355
2356 const struct amdgpu_ip_block_version dm_ip_block =
2357 {
2358         .type = AMD_IP_BLOCK_TYPE_DCE,
2359         .major = 1,
2360         .minor = 0,
2361         .rev = 0,
2362         .funcs = &amdgpu_dm_funcs,
2363 };
2364
2365
2366 /**
2367  * DOC: atomic
2368  *
2369  * *WIP*
2370  */
2371
2372 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2373         .fb_create = amdgpu_display_user_framebuffer_create,
2374         .get_format_info = amd_get_format_info,
2375         .output_poll_changed = drm_fb_helper_output_poll_changed,
2376         .atomic_check = amdgpu_dm_atomic_check,
2377         .atomic_commit = drm_atomic_helper_commit,
2378 };
2379
2380 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2381         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2382 };
2383
2384 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2385 {
2386         u32 max_cll, min_cll, max, min, q, r;
2387         struct amdgpu_dm_backlight_caps *caps;
2388         struct amdgpu_display_manager *dm;
2389         struct drm_connector *conn_base;
2390         struct amdgpu_device *adev;
2391         struct dc_link *link = NULL;
2392         static const u8 pre_computed_values[] = {
2393                 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2394                 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2395
2396         if (!aconnector || !aconnector->dc_link)
2397                 return;
2398
2399         link = aconnector->dc_link;
2400         if (link->connector_signal != SIGNAL_TYPE_EDP)
2401                 return;
2402
2403         conn_base = &aconnector->base;
2404         adev = drm_to_adev(conn_base->dev);
2405         dm = &adev->dm;
2406         caps = &dm->backlight_caps;
2407         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2408         caps->aux_support = false;
2409         max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2410         min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2411
2412         if (caps->ext_caps->bits.oled == 1 ||
2413             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2414             caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2415                 caps->aux_support = true;
2416
2417         if (amdgpu_backlight == 0)
2418                 caps->aux_support = false;
2419         else if (amdgpu_backlight == 1)
2420                 caps->aux_support = true;
2421
2422         /* From the specification (CTA-861-G), for calculating the maximum
2423          * luminance we need to use:
2424          *      Luminance = 50*2**(CV/32)
2425          * Where CV is a one-byte value.
2426          * For calculating this expression we may need float point precision;
2427          * to avoid this complexity level, we take advantage that CV is divided
2428          * by a constant. From the Euclids division algorithm, we know that CV
2429          * can be written as: CV = 32*q + r. Next, we replace CV in the
2430          * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2431          * need to pre-compute the value of r/32. For pre-computing the values
2432          * We just used the following Ruby line:
2433          *      (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2434          * The results of the above expressions can be verified at
2435          * pre_computed_values.
2436          */
2437         q = max_cll >> 5;
2438         r = max_cll % 32;
2439         max = (1 << q) * pre_computed_values[r];
2440
2441         // min luminance: maxLum * (CV/255)^2 / 100
2442         q = DIV_ROUND_CLOSEST(min_cll, 255);
2443         min = max * DIV_ROUND_CLOSEST((q * q), 100);
2444
2445         caps->aux_max_input_signal = max;
2446         caps->aux_min_input_signal = min;
2447 }
2448
2449 void amdgpu_dm_update_connector_after_detect(
2450                 struct amdgpu_dm_connector *aconnector)
2451 {
2452         struct drm_connector *connector = &aconnector->base;
2453         struct drm_device *dev = connector->dev;
2454         struct dc_sink *sink;
2455
2456         /* MST handled by drm_mst framework */
2457         if (aconnector->mst_mgr.mst_state == true)
2458                 return;
2459
2460         sink = aconnector->dc_link->local_sink;
2461         if (sink)
2462                 dc_sink_retain(sink);
2463
2464         /*
2465          * Edid mgmt connector gets first update only in mode_valid hook and then
2466          * the connector sink is set to either fake or physical sink depends on link status.
2467          * Skip if already done during boot.
2468          */
2469         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2470                         && aconnector->dc_em_sink) {
2471
2472                 /*
2473                  * For S3 resume with headless use eml_sink to fake stream
2474                  * because on resume connector->sink is set to NULL
2475                  */
2476                 mutex_lock(&dev->mode_config.mutex);
2477
2478                 if (sink) {
2479                         if (aconnector->dc_sink) {
2480                                 amdgpu_dm_update_freesync_caps(connector, NULL);
2481                                 /*
2482                                  * retain and release below are used to
2483                                  * bump up refcount for sink because the link doesn't point
2484                                  * to it anymore after disconnect, so on next crtc to connector
2485                                  * reshuffle by UMD we will get into unwanted dc_sink release
2486                                  */
2487                                 dc_sink_release(aconnector->dc_sink);
2488                         }
2489                         aconnector->dc_sink = sink;
2490                         dc_sink_retain(aconnector->dc_sink);
2491                         amdgpu_dm_update_freesync_caps(connector,
2492                                         aconnector->edid);
2493                 } else {
2494                         amdgpu_dm_update_freesync_caps(connector, NULL);
2495                         if (!aconnector->dc_sink) {
2496                                 aconnector->dc_sink = aconnector->dc_em_sink;
2497                                 dc_sink_retain(aconnector->dc_sink);
2498                         }
2499                 }
2500
2501                 mutex_unlock(&dev->mode_config.mutex);
2502
2503                 if (sink)
2504                         dc_sink_release(sink);
2505                 return;
2506         }
2507
2508         /*
2509          * TODO: temporary guard to look for proper fix
2510          * if this sink is MST sink, we should not do anything
2511          */
2512         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2513                 dc_sink_release(sink);
2514                 return;
2515         }
2516
2517         if (aconnector->dc_sink == sink) {
2518                 /*
2519                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
2520                  * Do nothing!!
2521                  */
2522                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2523                                 aconnector->connector_id);
2524                 if (sink)
2525                         dc_sink_release(sink);
2526                 return;
2527         }
2528
2529         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2530                 aconnector->connector_id, aconnector->dc_sink, sink);
2531
2532         mutex_lock(&dev->mode_config.mutex);
2533
2534         /*
2535          * 1. Update status of the drm connector
2536          * 2. Send an event and let userspace tell us what to do
2537          */
2538         if (sink) {
2539                 /*
2540                  * TODO: check if we still need the S3 mode update workaround.
2541                  * If yes, put it here.
2542                  */
2543                 if (aconnector->dc_sink) {
2544                         amdgpu_dm_update_freesync_caps(connector, NULL);
2545                         dc_sink_release(aconnector->dc_sink);
2546                 }
2547
2548                 aconnector->dc_sink = sink;
2549                 dc_sink_retain(aconnector->dc_sink);
2550                 if (sink->dc_edid.length == 0) {
2551                         aconnector->edid = NULL;
2552                         if (aconnector->dc_link->aux_mode) {
2553                                 drm_dp_cec_unset_edid(
2554                                         &aconnector->dm_dp_aux.aux);
2555                         }
2556                 } else {
2557                         aconnector->edid =
2558                                 (struct edid *)sink->dc_edid.raw_edid;
2559
2560                         drm_connector_update_edid_property(connector,
2561                                                            aconnector->edid);
2562                         if (aconnector->dc_link->aux_mode)
2563                                 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2564                                                     aconnector->edid);
2565                 }
2566
2567                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2568                 update_connector_ext_caps(aconnector);
2569         } else {
2570                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2571                 amdgpu_dm_update_freesync_caps(connector, NULL);
2572                 drm_connector_update_edid_property(connector, NULL);
2573                 aconnector->num_modes = 0;
2574                 dc_sink_release(aconnector->dc_sink);
2575                 aconnector->dc_sink = NULL;
2576                 aconnector->edid = NULL;
2577 #ifdef CONFIG_DRM_AMD_DC_HDCP
2578                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2579                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2580                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2581 #endif
2582         }
2583
2584         mutex_unlock(&dev->mode_config.mutex);
2585
2586         update_subconnector_property(aconnector);
2587
2588         if (sink)
2589                 dc_sink_release(sink);
2590 }
2591
2592 static void handle_hpd_irq(void *param)
2593 {
2594         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2595         struct drm_connector *connector = &aconnector->base;
2596         struct drm_device *dev = connector->dev;
2597         enum dc_connection_type new_connection_type = dc_connection_none;
2598         struct amdgpu_device *adev = drm_to_adev(dev);
2599 #ifdef CONFIG_DRM_AMD_DC_HDCP
2600         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2601 #endif
2602
2603         if (adev->dm.disable_hpd_irq)
2604                 return;
2605
2606         /*
2607          * In case of failure or MST no need to update connector status or notify the OS
2608          * since (for MST case) MST does this in its own context.
2609          */
2610         mutex_lock(&aconnector->hpd_lock);
2611
2612 #ifdef CONFIG_DRM_AMD_DC_HDCP
2613         if (adev->dm.hdcp_workqueue) {
2614                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2615                 dm_con_state->update_hdcp = true;
2616         }
2617 #endif
2618         if (aconnector->fake_enable)
2619                 aconnector->fake_enable = false;
2620
2621         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2622                 DRM_ERROR("KMS: Failed to detect connector\n");
2623
2624         if (aconnector->base.force && new_connection_type == dc_connection_none) {
2625                 emulated_link_detect(aconnector->dc_link);
2626
2627
2628                 drm_modeset_lock_all(dev);
2629                 dm_restore_drm_connector_state(dev, connector);
2630                 drm_modeset_unlock_all(dev);
2631
2632                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2633                         drm_kms_helper_hotplug_event(dev);
2634
2635         } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2636                 if (new_connection_type == dc_connection_none &&
2637                     aconnector->dc_link->type == dc_connection_none)
2638                         dm_set_dpms_off(aconnector->dc_link);
2639
2640                 amdgpu_dm_update_connector_after_detect(aconnector);
2641
2642                 drm_modeset_lock_all(dev);
2643                 dm_restore_drm_connector_state(dev, connector);
2644                 drm_modeset_unlock_all(dev);
2645
2646                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2647                         drm_kms_helper_hotplug_event(dev);
2648         }
2649         mutex_unlock(&aconnector->hpd_lock);
2650
2651 }
2652
2653 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2654 {
2655         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2656         uint8_t dret;
2657         bool new_irq_handled = false;
2658         int dpcd_addr;
2659         int dpcd_bytes_to_read;
2660
2661         const int max_process_count = 30;
2662         int process_count = 0;
2663
2664         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2665
2666         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2667                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2668                 /* DPCD 0x200 - 0x201 for downstream IRQ */
2669                 dpcd_addr = DP_SINK_COUNT;
2670         } else {
2671                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2672                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2673                 dpcd_addr = DP_SINK_COUNT_ESI;
2674         }
2675
2676         dret = drm_dp_dpcd_read(
2677                 &aconnector->dm_dp_aux.aux,
2678                 dpcd_addr,
2679                 esi,
2680                 dpcd_bytes_to_read);
2681
2682         while (dret == dpcd_bytes_to_read &&
2683                 process_count < max_process_count) {
2684                 uint8_t retry;
2685                 dret = 0;
2686
2687                 process_count++;
2688
2689                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2690                 /* handle HPD short pulse irq */
2691                 if (aconnector->mst_mgr.mst_state)
2692                         drm_dp_mst_hpd_irq(
2693                                 &aconnector->mst_mgr,
2694                                 esi,
2695                                 &new_irq_handled);
2696
2697                 if (new_irq_handled) {
2698                         /* ACK at DPCD to notify down stream */
2699                         const int ack_dpcd_bytes_to_write =
2700                                 dpcd_bytes_to_read - 1;
2701
2702                         for (retry = 0; retry < 3; retry++) {
2703                                 uint8_t wret;
2704
2705                                 wret = drm_dp_dpcd_write(
2706                                         &aconnector->dm_dp_aux.aux,
2707                                         dpcd_addr + 1,
2708                                         &esi[1],
2709                                         ack_dpcd_bytes_to_write);
2710                                 if (wret == ack_dpcd_bytes_to_write)
2711                                         break;
2712                         }
2713
2714                         /* check if there is new irq to be handled */
2715                         dret = drm_dp_dpcd_read(
2716                                 &aconnector->dm_dp_aux.aux,
2717                                 dpcd_addr,
2718                                 esi,
2719                                 dpcd_bytes_to_read);
2720
2721                         new_irq_handled = false;
2722                 } else {
2723                         break;
2724                 }
2725         }
2726
2727         if (process_count == max_process_count)
2728                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2729 }
2730
2731 static void handle_hpd_rx_irq(void *param)
2732 {
2733         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2734         struct drm_connector *connector = &aconnector->base;
2735         struct drm_device *dev = connector->dev;
2736         struct dc_link *dc_link = aconnector->dc_link;
2737         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2738         bool result = false;
2739         enum dc_connection_type new_connection_type = dc_connection_none;
2740         struct amdgpu_device *adev = drm_to_adev(dev);
2741         union hpd_irq_data hpd_irq_data;
2742
2743         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2744
2745         if (adev->dm.disable_hpd_irq)
2746                 return;
2747
2748
2749         /*
2750          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2751          * conflict, after implement i2c helper, this mutex should be
2752          * retired.
2753          */
2754         mutex_lock(&aconnector->hpd_lock);
2755
2756         read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2757
2758         if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2759                 (dc_link->type == dc_connection_mst_branch)) {
2760                 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2761                         result = true;
2762                         dm_handle_hpd_rx_irq(aconnector);
2763                         goto out;
2764                 } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2765                         result = false;
2766                         dm_handle_hpd_rx_irq(aconnector);
2767                         goto out;
2768                 }
2769         }
2770
2771         if (!amdgpu_in_reset(adev)) {
2772                 mutex_lock(&adev->dm.dc_lock);
2773 #ifdef CONFIG_DRM_AMD_DC_HDCP
2774         result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2775 #else
2776         result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2777 #endif
2778                 mutex_unlock(&adev->dm.dc_lock);
2779         }
2780
2781 out:
2782         if (result && !is_mst_root_connector) {
2783                 /* Downstream Port status changed. */
2784                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2785                         DRM_ERROR("KMS: Failed to detect connector\n");
2786
2787                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2788                         emulated_link_detect(dc_link);
2789
2790                         if (aconnector->fake_enable)
2791                                 aconnector->fake_enable = false;
2792
2793                         amdgpu_dm_update_connector_after_detect(aconnector);
2794
2795
2796                         drm_modeset_lock_all(dev);
2797                         dm_restore_drm_connector_state(dev, connector);
2798                         drm_modeset_unlock_all(dev);
2799
2800                         drm_kms_helper_hotplug_event(dev);
2801                 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2802
2803                         if (aconnector->fake_enable)
2804                                 aconnector->fake_enable = false;
2805
2806                         amdgpu_dm_update_connector_after_detect(aconnector);
2807
2808
2809                         drm_modeset_lock_all(dev);
2810                         dm_restore_drm_connector_state(dev, connector);
2811                         drm_modeset_unlock_all(dev);
2812
2813                         drm_kms_helper_hotplug_event(dev);
2814                 }
2815         }
2816 #ifdef CONFIG_DRM_AMD_DC_HDCP
2817         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2818                 if (adev->dm.hdcp_workqueue)
2819                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2820         }
2821 #endif
2822
2823         if (dc_link->type != dc_connection_mst_branch)
2824                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2825
2826         mutex_unlock(&aconnector->hpd_lock);
2827 }
2828
2829 static void register_hpd_handlers(struct amdgpu_device *adev)
2830 {
2831         struct drm_device *dev = adev_to_drm(adev);
2832         struct drm_connector *connector;
2833         struct amdgpu_dm_connector *aconnector;
2834         const struct dc_link *dc_link;
2835         struct dc_interrupt_params int_params = {0};
2836
2837         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2838         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2839
2840         list_for_each_entry(connector,
2841                         &dev->mode_config.connector_list, head) {
2842
2843                 aconnector = to_amdgpu_dm_connector(connector);
2844                 dc_link = aconnector->dc_link;
2845
2846                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2847                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2848                         int_params.irq_source = dc_link->irq_source_hpd;
2849
2850                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2851                                         handle_hpd_irq,
2852                                         (void *) aconnector);
2853                 }
2854
2855                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2856
2857                         /* Also register for DP short pulse (hpd_rx). */
2858                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2859                         int_params.irq_source = dc_link->irq_source_hpd_rx;
2860
2861                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2862                                         handle_hpd_rx_irq,
2863                                         (void *) aconnector);
2864                 }
2865         }
2866 }
2867
2868 #if defined(CONFIG_DRM_AMD_DC_SI)
2869 /* Register IRQ sources and initialize IRQ callbacks */
2870 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2871 {
2872         struct dc *dc = adev->dm.dc;
2873         struct common_irq_params *c_irq_params;
2874         struct dc_interrupt_params int_params = {0};
2875         int r;
2876         int i;
2877         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2878
2879         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2880         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2881
2882         /*
2883          * Actions of amdgpu_irq_add_id():
2884          * 1. Register a set() function with base driver.
2885          *    Base driver will call set() function to enable/disable an
2886          *    interrupt in DC hardware.
2887          * 2. Register amdgpu_dm_irq_handler().
2888          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2889          *    coming from DC hardware.
2890          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2891          *    for acknowledging and handling. */
2892
2893         /* Use VBLANK interrupt */
2894         for (i = 0; i < adev->mode_info.num_crtc; i++) {
2895                 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2896                 if (r) {
2897                         DRM_ERROR("Failed to add crtc irq id!\n");
2898                         return r;
2899                 }
2900
2901                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2902                 int_params.irq_source =
2903                         dc_interrupt_to_irq_source(dc, i+1 , 0);
2904
2905                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2906
2907                 c_irq_params->adev = adev;
2908                 c_irq_params->irq_src = int_params.irq_source;
2909
2910                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2911                                 dm_crtc_high_irq, c_irq_params);
2912         }
2913
2914         /* Use GRPH_PFLIP interrupt */
2915         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2916                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2917                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2918                 if (r) {
2919                         DRM_ERROR("Failed to add page flip irq id!\n");
2920                         return r;
2921                 }
2922
2923                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2924                 int_params.irq_source =
2925                         dc_interrupt_to_irq_source(dc, i, 0);
2926
2927                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2928
2929                 c_irq_params->adev = adev;
2930                 c_irq_params->irq_src = int_params.irq_source;
2931
2932                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2933                                 dm_pflip_high_irq, c_irq_params);
2934
2935         }
2936
2937         /* HPD */
2938         r = amdgpu_irq_add_id(adev, client_id,
2939                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2940         if (r) {
2941                 DRM_ERROR("Failed to add hpd irq id!\n");
2942                 return r;
2943         }
2944
2945         register_hpd_handlers(adev);
2946
2947         return 0;
2948 }
2949 #endif
2950
2951 /* Register IRQ sources and initialize IRQ callbacks */
2952 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2953 {
2954         struct dc *dc = adev->dm.dc;
2955         struct common_irq_params *c_irq_params;
2956         struct dc_interrupt_params int_params = {0};
2957         int r;
2958         int i;
2959         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2960
2961         if (adev->asic_type >= CHIP_VEGA10)
2962                 client_id = SOC15_IH_CLIENTID_DCE;
2963
2964         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2965         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2966
2967         /*
2968          * Actions of amdgpu_irq_add_id():
2969          * 1. Register a set() function with base driver.
2970          *    Base driver will call set() function to enable/disable an
2971          *    interrupt in DC hardware.
2972          * 2. Register amdgpu_dm_irq_handler().
2973          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2974          *    coming from DC hardware.
2975          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2976          *    for acknowledging and handling. */
2977
2978         /* Use VBLANK interrupt */
2979         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2980                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2981                 if (r) {
2982                         DRM_ERROR("Failed to add crtc irq id!\n");
2983                         return r;
2984                 }
2985
2986                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2987                 int_params.irq_source =
2988                         dc_interrupt_to_irq_source(dc, i, 0);
2989
2990                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2991
2992                 c_irq_params->adev = adev;
2993                 c_irq_params->irq_src = int_params.irq_source;
2994
2995                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2996                                 dm_crtc_high_irq, c_irq_params);
2997         }
2998
2999         /* Use VUPDATE interrupt */
3000         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3001                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3002                 if (r) {
3003                         DRM_ERROR("Failed to add vupdate irq id!\n");
3004                         return r;
3005                 }
3006
3007                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3008                 int_params.irq_source =
3009                         dc_interrupt_to_irq_source(dc, i, 0);
3010
3011                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3012
3013                 c_irq_params->adev = adev;
3014                 c_irq_params->irq_src = int_params.irq_source;
3015
3016                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3017                                 dm_vupdate_high_irq, c_irq_params);
3018         }
3019
3020         /* Use GRPH_PFLIP interrupt */
3021         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3022                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3023                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3024                 if (r) {
3025                         DRM_ERROR("Failed to add page flip irq id!\n");
3026                         return r;
3027                 }
3028
3029                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3030                 int_params.irq_source =
3031                         dc_interrupt_to_irq_source(dc, i, 0);
3032
3033                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3034
3035                 c_irq_params->adev = adev;
3036                 c_irq_params->irq_src = int_params.irq_source;
3037
3038                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3039                                 dm_pflip_high_irq, c_irq_params);
3040
3041         }
3042
3043         /* HPD */
3044         r = amdgpu_irq_add_id(adev, client_id,
3045                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3046         if (r) {
3047                 DRM_ERROR("Failed to add hpd irq id!\n");
3048                 return r;
3049         }
3050
3051         register_hpd_handlers(adev);
3052
3053         return 0;
3054 }
3055
3056 #if defined(CONFIG_DRM_AMD_DC_DCN)
3057 /* Register IRQ sources and initialize IRQ callbacks */
3058 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3059 {
3060         struct dc *dc = adev->dm.dc;
3061         struct common_irq_params *c_irq_params;
3062         struct dc_interrupt_params int_params = {0};
3063         int r;
3064         int i;
3065 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3066         static const unsigned int vrtl_int_srcid[] = {
3067                 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3068                 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3069                 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3070                 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3071                 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3072                 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3073         };
3074 #endif
3075
3076         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3077         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3078
3079         /*
3080          * Actions of amdgpu_irq_add_id():
3081          * 1. Register a set() function with base driver.
3082          *    Base driver will call set() function to enable/disable an
3083          *    interrupt in DC hardware.
3084          * 2. Register amdgpu_dm_irq_handler().
3085          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3086          *    coming from DC hardware.
3087          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3088          *    for acknowledging and handling.
3089          */
3090
3091         /* Use VSTARTUP interrupt */
3092         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3093                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3094                         i++) {
3095                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3096
3097                 if (r) {
3098                         DRM_ERROR("Failed to add crtc irq id!\n");
3099                         return r;
3100                 }
3101
3102                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3103                 int_params.irq_source =
3104                         dc_interrupt_to_irq_source(dc, i, 0);
3105
3106                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3107
3108                 c_irq_params->adev = adev;
3109                 c_irq_params->irq_src = int_params.irq_source;
3110
3111                 amdgpu_dm_irq_register_interrupt(
3112                         adev, &int_params, dm_crtc_high_irq, c_irq_params);
3113         }
3114
3115         /* Use otg vertical line interrupt */
3116 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3117         for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3118                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3119                                 vrtl_int_srcid[i], &adev->vline0_irq);
3120
3121                 if (r) {
3122                         DRM_ERROR("Failed to add vline0 irq id!\n");
3123                         return r;
3124                 }
3125
3126                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3127                 int_params.irq_source =
3128                         dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3129
3130                 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3131                         DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3132                         break;
3133                 }
3134
3135                 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3136                                         - DC_IRQ_SOURCE_DC1_VLINE0];
3137
3138                 c_irq_params->adev = adev;
3139                 c_irq_params->irq_src = int_params.irq_source;
3140
3141                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3142                                 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3143         }
3144 #endif
3145
3146         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3147          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3148          * to trigger at end of each vblank, regardless of state of the lock,
3149          * matching DCE behaviour.
3150          */
3151         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3152              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3153              i++) {
3154                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3155
3156                 if (r) {
3157                         DRM_ERROR("Failed to add vupdate irq id!\n");
3158                         return r;
3159                 }
3160
3161                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3162                 int_params.irq_source =
3163                         dc_interrupt_to_irq_source(dc, i, 0);
3164
3165                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3166
3167                 c_irq_params->adev = adev;
3168                 c_irq_params->irq_src = int_params.irq_source;
3169
3170                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3171                                 dm_vupdate_high_irq, c_irq_params);
3172         }
3173
3174         /* Use GRPH_PFLIP interrupt */
3175         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3176                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3177                         i++) {
3178                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3179                 if (r) {
3180                         DRM_ERROR("Failed to add page flip irq id!\n");
3181                         return r;
3182                 }
3183
3184                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3185                 int_params.irq_source =
3186                         dc_interrupt_to_irq_source(dc, i, 0);
3187
3188                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3189
3190                 c_irq_params->adev = adev;
3191                 c_irq_params->irq_src = int_params.irq_source;
3192
3193                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3194                                 dm_pflip_high_irq, c_irq_params);
3195
3196         }
3197
3198         /* HPD */
3199         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3200                         &adev->hpd_irq);
3201         if (r) {
3202                 DRM_ERROR("Failed to add hpd irq id!\n");
3203                 return r;
3204         }
3205
3206         register_hpd_handlers(adev);
3207
3208         return 0;
3209 }
3210 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3211 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3212 {
3213         struct dc *dc = adev->dm.dc;
3214         struct common_irq_params *c_irq_params;
3215         struct dc_interrupt_params int_params = {0};
3216         int r, i;
3217
3218         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3219         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3220
3221         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3222                         &adev->dmub_outbox_irq);
3223         if (r) {
3224                 DRM_ERROR("Failed to add outbox irq id!\n");
3225                 return r;
3226         }
3227
3228         if (dc->ctx->dmub_srv) {
3229                 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3230                 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3231                 int_params.irq_source =
3232                 dc_interrupt_to_irq_source(dc, i, 0);
3233
3234                 c_irq_params = &adev->dm.dmub_outbox_params[0];
3235
3236                 c_irq_params->adev = adev;
3237                 c_irq_params->irq_src = int_params.irq_source;
3238
3239                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3240                                 dm_dmub_outbox1_low_irq, c_irq_params);
3241         }
3242
3243         return 0;
3244 }
3245 #endif
3246
3247 /*
3248  * Acquires the lock for the atomic state object and returns
3249  * the new atomic state.
3250  *
3251  * This should only be called during atomic check.
3252  */
3253 static int dm_atomic_get_state(struct drm_atomic_state *state,
3254                                struct dm_atomic_state **dm_state)
3255 {
3256         struct drm_device *dev = state->dev;
3257         struct amdgpu_device *adev = drm_to_adev(dev);
3258         struct amdgpu_display_manager *dm = &adev->dm;
3259         struct drm_private_state *priv_state;
3260
3261         if (*dm_state)
3262                 return 0;
3263
3264         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3265         if (IS_ERR(priv_state))
3266                 return PTR_ERR(priv_state);
3267
3268         *dm_state = to_dm_atomic_state(priv_state);
3269
3270         return 0;
3271 }
3272
3273 static struct dm_atomic_state *
3274 dm_atomic_get_new_state(struct drm_atomic_state *state)
3275 {
3276         struct drm_device *dev = state->dev;
3277         struct amdgpu_device *adev = drm_to_adev(dev);
3278         struct amdgpu_display_manager *dm = &adev->dm;
3279         struct drm_private_obj *obj;
3280         struct drm_private_state *new_obj_state;
3281         int i;
3282
3283         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3284                 if (obj->funcs == dm->atomic_obj.funcs)
3285                         return to_dm_atomic_state(new_obj_state);
3286         }
3287
3288         return NULL;
3289 }
3290
3291 static struct drm_private_state *
3292 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3293 {
3294         struct dm_atomic_state *old_state, *new_state;
3295
3296         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3297         if (!new_state)
3298                 return NULL;
3299
3300         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3301
3302         old_state = to_dm_atomic_state(obj->state);
3303
3304         if (old_state && old_state->context)
3305                 new_state->context = dc_copy_state(old_state->context);
3306
3307         if (!new_state->context) {
3308                 kfree(new_state);
3309                 return NULL;
3310         }
3311
3312         return &new_state->base;
3313 }
3314
3315 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3316                                     struct drm_private_state *state)
3317 {
3318         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3319
3320         if (dm_state && dm_state->context)
3321                 dc_release_state(dm_state->context);
3322
3323         kfree(dm_state);
3324 }
3325
3326 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3327         .atomic_duplicate_state = dm_atomic_duplicate_state,
3328         .atomic_destroy_state = dm_atomic_destroy_state,
3329 };
3330
3331 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3332 {
3333         struct dm_atomic_state *state;
3334         int r;
3335
3336         adev->mode_info.mode_config_initialized = true;
3337
3338         adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3339         adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3340
3341         adev_to_drm(adev)->mode_config.max_width = 16384;
3342         adev_to_drm(adev)->mode_config.max_height = 16384;
3343
3344         adev_to_drm(adev)->mode_config.preferred_depth = 24;
3345         adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3346         /* indicates support for immediate flip */
3347         adev_to_drm(adev)->mode_config.async_page_flip = true;
3348
3349         adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3350
3351         state = kzalloc(sizeof(*state), GFP_KERNEL);
3352         if (!state)
3353                 return -ENOMEM;
3354
3355         state->context = dc_create_state(adev->dm.dc);
3356         if (!state->context) {
3357                 kfree(state);
3358                 return -ENOMEM;
3359         }
3360
3361         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3362
3363         drm_atomic_private_obj_init(adev_to_drm(adev),
3364                                     &adev->dm.atomic_obj,
3365                                     &state->base,
3366                                     &dm_atomic_state_funcs);
3367
3368         r = amdgpu_display_modeset_create_props(adev);
3369         if (r) {
3370                 dc_release_state(state->context);
3371                 kfree(state);
3372                 return r;
3373         }
3374
3375         r = amdgpu_dm_audio_init(adev);
3376         if (r) {
3377                 dc_release_state(state->context);
3378                 kfree(state);
3379                 return r;
3380         }
3381
3382         return 0;
3383 }
3384
3385 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3386 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3387 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3388
3389 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3390         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3391
3392 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3393 {
3394 #if defined(CONFIG_ACPI)
3395         struct amdgpu_dm_backlight_caps caps;
3396
3397         memset(&caps, 0, sizeof(caps));
3398
3399         if (dm->backlight_caps.caps_valid)
3400                 return;
3401
3402         amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3403         if (caps.caps_valid) {
3404                 dm->backlight_caps.caps_valid = true;
3405                 if (caps.aux_support)
3406                         return;
3407                 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3408                 dm->backlight_caps.max_input_signal = caps.max_input_signal;
3409         } else {
3410                 dm->backlight_caps.min_input_signal =
3411                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3412                 dm->backlight_caps.max_input_signal =
3413                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3414         }
3415 #else
3416         if (dm->backlight_caps.aux_support)
3417                 return;
3418
3419         dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3420         dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3421 #endif
3422 }
3423
3424 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3425                                 unsigned *min, unsigned *max)
3426 {
3427         if (!caps)
3428                 return 0;
3429
3430         if (caps->aux_support) {
3431                 // Firmware limits are in nits, DC API wants millinits.
3432                 *max = 1000 * caps->aux_max_input_signal;
3433                 *min = 1000 * caps->aux_min_input_signal;
3434         } else {
3435                 // Firmware limits are 8-bit, PWM control is 16-bit.
3436                 *max = 0x101 * caps->max_input_signal;
3437                 *min = 0x101 * caps->min_input_signal;
3438         }
3439         return 1;
3440 }
3441
3442 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3443                                         uint32_t brightness)
3444 {
3445         unsigned min, max;
3446
3447         if (!get_brightness_range(caps, &min, &max))
3448                 return brightness;
3449
3450         // Rescale 0..255 to min..max
3451         return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3452                                        AMDGPU_MAX_BL_LEVEL);
3453 }
3454
3455 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3456                                       uint32_t brightness)
3457 {
3458         unsigned min, max;
3459
3460         if (!get_brightness_range(caps, &min, &max))
3461                 return brightness;
3462
3463         if (brightness < min)
3464                 return 0;
3465         // Rescale min..max to 0..255
3466         return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3467                                  max - min);
3468 }
3469
3470 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3471 {
3472         struct amdgpu_display_manager *dm = bl_get_data(bd);
3473         struct amdgpu_dm_backlight_caps caps;
3474         struct dc_link *link[AMDGPU_DM_MAX_NUM_EDP];
3475         u32 brightness;
3476         bool rc;
3477         int i;
3478
3479         amdgpu_dm_update_backlight_caps(dm);
3480         caps = dm->backlight_caps;
3481
3482         for (i = 0; i < dm->num_of_edps; i++)
3483                 link[i] = (struct dc_link *)dm->backlight_link[i];
3484
3485         brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3486         // Change brightness based on AUX property
3487         if (caps.aux_support) {
3488                 for (i = 0; i < dm->num_of_edps; i++) {
3489                         rc = dc_link_set_backlight_level_nits(link[i], true, brightness,
3490                                 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3491                         if (!rc) {
3492                                 DRM_ERROR("DM: Failed to update backlight via AUX on eDP[%d]\n", i);
3493                                 break;
3494                         }
3495                 }
3496         } else {
3497                 for (i = 0; i < dm->num_of_edps; i++) {
3498                         rc = dc_link_set_backlight_level(dm->backlight_link[i], brightness, 0);
3499                         if (!rc) {
3500                                 DRM_ERROR("DM: Failed to update backlight on eDP[%d]\n", i);
3501                                 break;
3502                         }
3503                 }
3504         }
3505
3506         return rc ? 0 : 1;
3507 }
3508
3509 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3510 {
3511         struct amdgpu_display_manager *dm = bl_get_data(bd);
3512         struct amdgpu_dm_backlight_caps caps;
3513
3514         amdgpu_dm_update_backlight_caps(dm);
3515         caps = dm->backlight_caps;
3516
3517         if (caps.aux_support) {
3518                 struct dc_link *link = (struct dc_link *)dm->backlight_link[0];
3519                 u32 avg, peak;
3520                 bool rc;
3521
3522                 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3523                 if (!rc)
3524                         return bd->props.brightness;
3525                 return convert_brightness_to_user(&caps, avg);
3526         } else {
3527                 int ret = dc_link_get_backlight_level(dm->backlight_link[0]);
3528
3529                 if (ret == DC_ERROR_UNEXPECTED)
3530                         return bd->props.brightness;
3531                 return convert_brightness_to_user(&caps, ret);
3532         }
3533 }
3534
3535 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3536         .options = BL_CORE_SUSPENDRESUME,
3537         .get_brightness = amdgpu_dm_backlight_get_brightness,
3538         .update_status  = amdgpu_dm_backlight_update_status,
3539 };
3540
3541 static void
3542 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3543 {
3544         char bl_name[16];
3545         struct backlight_properties props = { 0 };
3546
3547         amdgpu_dm_update_backlight_caps(dm);
3548
3549         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3550         props.brightness = AMDGPU_MAX_BL_LEVEL;
3551         props.type = BACKLIGHT_RAW;
3552
3553         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3554                  adev_to_drm(dm->adev)->primary->index);
3555
3556         dm->backlight_dev = backlight_device_register(bl_name,
3557                                                       adev_to_drm(dm->adev)->dev,
3558                                                       dm,
3559                                                       &amdgpu_dm_backlight_ops,
3560                                                       &props);
3561
3562         if (IS_ERR(dm->backlight_dev))
3563                 DRM_ERROR("DM: Backlight registration failed!\n");
3564         else
3565                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3566 }
3567
3568 #endif
3569
3570 static int initialize_plane(struct amdgpu_display_manager *dm,
3571                             struct amdgpu_mode_info *mode_info, int plane_id,
3572                             enum drm_plane_type plane_type,
3573                             const struct dc_plane_cap *plane_cap)
3574 {
3575         struct drm_plane *plane;
3576         unsigned long possible_crtcs;
3577         int ret = 0;
3578
3579         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3580         if (!plane) {
3581                 DRM_ERROR("KMS: Failed to allocate plane\n");
3582                 return -ENOMEM;
3583         }
3584         plane->type = plane_type;
3585
3586         /*
3587          * HACK: IGT tests expect that the primary plane for a CRTC
3588          * can only have one possible CRTC. Only expose support for
3589          * any CRTC if they're not going to be used as a primary plane
3590          * for a CRTC - like overlay or underlay planes.
3591          */
3592         possible_crtcs = 1 << plane_id;
3593         if (plane_id >= dm->dc->caps.max_streams)
3594                 possible_crtcs = 0xff;
3595
3596         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3597
3598         if (ret) {
3599                 DRM_ERROR("KMS: Failed to initialize plane\n");
3600                 kfree(plane);
3601                 return ret;
3602         }
3603
3604         if (mode_info)
3605                 mode_info->planes[plane_id] = plane;
3606
3607         return ret;
3608 }
3609
3610
3611 static void register_backlight_device(struct amdgpu_display_manager *dm,
3612                                       struct dc_link *link)
3613 {
3614 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3615         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3616
3617         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3618             link->type != dc_connection_none) {
3619                 /*
3620                  * Event if registration failed, we should continue with
3621                  * DM initialization because not having a backlight control
3622                  * is better then a black screen.
3623                  */
3624                 if (!dm->backlight_dev)
3625                         amdgpu_dm_register_backlight_device(dm);
3626
3627                 if (dm->backlight_dev) {
3628                         dm->backlight_link[dm->num_of_edps] = link;
3629                         dm->num_of_edps++;
3630                 }
3631         }
3632 #endif
3633 }
3634
3635
3636 /*
3637  * In this architecture, the association
3638  * connector -> encoder -> crtc
3639  * id not really requried. The crtc and connector will hold the
3640  * display_index as an abstraction to use with DAL component
3641  *
3642  * Returns 0 on success
3643  */
3644 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3645 {
3646         struct amdgpu_display_manager *dm = &adev->dm;
3647         int32_t i;
3648         struct amdgpu_dm_connector *aconnector = NULL;
3649         struct amdgpu_encoder *aencoder = NULL;
3650         struct amdgpu_mode_info *mode_info = &adev->mode_info;
3651         uint32_t link_cnt;
3652         int32_t primary_planes;
3653         enum dc_connection_type new_connection_type = dc_connection_none;
3654         const struct dc_plane_cap *plane;
3655
3656         dm->display_indexes_num = dm->dc->caps.max_streams;
3657         /* Update the actual used number of crtc */
3658         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3659
3660         link_cnt = dm->dc->caps.max_links;
3661         if (amdgpu_dm_mode_config_init(dm->adev)) {
3662                 DRM_ERROR("DM: Failed to initialize mode config\n");
3663                 return -EINVAL;
3664         }
3665
3666         /* There is one primary plane per CRTC */
3667         primary_planes = dm->dc->caps.max_streams;
3668         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3669
3670         /*
3671          * Initialize primary planes, implicit planes for legacy IOCTLS.
3672          * Order is reversed to match iteration order in atomic check.
3673          */
3674         for (i = (primary_planes - 1); i >= 0; i--) {
3675                 plane = &dm->dc->caps.planes[i];
3676
3677                 if (initialize_plane(dm, mode_info, i,
3678                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
3679                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
3680                         goto fail;
3681                 }
3682         }
3683
3684         /*
3685          * Initialize overlay planes, index starting after primary planes.
3686          * These planes have a higher DRM index than the primary planes since
3687          * they should be considered as having a higher z-order.
3688          * Order is reversed to match iteration order in atomic check.
3689          *
3690          * Only support DCN for now, and only expose one so we don't encourage
3691          * userspace to use up all the pipes.
3692          */
3693         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3694                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3695
3696                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3697                         continue;
3698
3699                 if (!plane->blends_with_above || !plane->blends_with_below)
3700                         continue;
3701
3702                 if (!plane->pixel_format_support.argb8888)
3703                         continue;
3704
3705                 if (initialize_plane(dm, NULL, primary_planes + i,
3706                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
3707                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3708                         goto fail;
3709                 }
3710
3711                 /* Only create one overlay plane. */
3712                 break;
3713         }
3714
3715         for (i = 0; i < dm->dc->caps.max_streams; i++)
3716                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3717                         DRM_ERROR("KMS: Failed to initialize crtc\n");
3718                         goto fail;
3719                 }
3720
3721 #if defined(CONFIG_DRM_AMD_DC_DCN)
3722         /* Use Outbox interrupt */
3723         switch (adev->asic_type) {
3724         case CHIP_SIENNA_CICHLID:
3725         case CHIP_NAVY_FLOUNDER:
3726         case CHIP_RENOIR:
3727                 if (register_outbox_irq_handlers(dm->adev)) {
3728                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3729                         goto fail;
3730                 }
3731                 break;
3732         default:
3733                 DRM_DEBUG_KMS("Unsupported ASIC type for outbox: 0x%X\n", adev->asic_type);
3734         }
3735 #endif
3736
3737         /* loops over all connectors on the board */
3738         for (i = 0; i < link_cnt; i++) {
3739                 struct dc_link *link = NULL;
3740
3741                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3742                         DRM_ERROR(
3743                                 "KMS: Cannot support more than %d display indexes\n",
3744                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
3745                         continue;
3746                 }
3747
3748                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3749                 if (!aconnector)
3750                         goto fail;
3751
3752                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3753                 if (!aencoder)
3754                         goto fail;
3755
3756                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3757                         DRM_ERROR("KMS: Failed to initialize encoder\n");
3758                         goto fail;
3759                 }
3760
3761                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3762                         DRM_ERROR("KMS: Failed to initialize connector\n");
3763                         goto fail;
3764                 }
3765
3766                 link = dc_get_link_at_index(dm->dc, i);
3767
3768                 if (!dc_link_detect_sink(link, &new_connection_type))
3769                         DRM_ERROR("KMS: Failed to detect connector\n");
3770
3771                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3772                         emulated_link_detect(link);
3773                         amdgpu_dm_update_connector_after_detect(aconnector);
3774
3775                 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3776                         amdgpu_dm_update_connector_after_detect(aconnector);
3777                         register_backlight_device(dm, link);
3778                         if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3779                                 amdgpu_dm_set_psr_caps(link);
3780                 }
3781
3782
3783         }
3784
3785         /* Software is initialized. Now we can register interrupt handlers. */
3786         switch (adev->asic_type) {
3787 #if defined(CONFIG_DRM_AMD_DC_SI)
3788         case CHIP_TAHITI:
3789         case CHIP_PITCAIRN:
3790         case CHIP_VERDE:
3791         case CHIP_OLAND:
3792                 if (dce60_register_irq_handlers(dm->adev)) {
3793                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3794                         goto fail;
3795                 }
3796                 break;
3797 #endif
3798         case CHIP_BONAIRE:
3799         case CHIP_HAWAII:
3800         case CHIP_KAVERI:
3801         case CHIP_KABINI:
3802         case CHIP_MULLINS:
3803         case CHIP_TONGA:
3804         case CHIP_FIJI:
3805         case CHIP_CARRIZO:
3806         case CHIP_STONEY:
3807         case CHIP_POLARIS11:
3808         case CHIP_POLARIS10:
3809         case CHIP_POLARIS12:
3810         case CHIP_VEGAM:
3811         case CHIP_VEGA10:
3812         case CHIP_VEGA12:
3813         case CHIP_VEGA20:
3814                 if (dce110_register_irq_handlers(dm->adev)) {
3815                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3816                         goto fail;
3817                 }
3818                 break;
3819 #if defined(CONFIG_DRM_AMD_DC_DCN)
3820         case CHIP_RAVEN:
3821         case CHIP_NAVI12:
3822         case CHIP_NAVI10:
3823         case CHIP_NAVI14:
3824         case CHIP_RENOIR:
3825         case CHIP_SIENNA_CICHLID:
3826         case CHIP_NAVY_FLOUNDER:
3827         case CHIP_DIMGREY_CAVEFISH:
3828         case CHIP_VANGOGH:
3829                 if (dcn10_register_irq_handlers(dm->adev)) {
3830                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3831                         goto fail;
3832                 }
3833                 break;
3834 #endif
3835         default:
3836                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3837                 goto fail;
3838         }
3839
3840         return 0;
3841 fail:
3842         kfree(aencoder);
3843         kfree(aconnector);
3844
3845         return -EINVAL;
3846 }
3847
3848 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3849 {
3850         drm_mode_config_cleanup(dm->ddev);
3851         drm_atomic_private_obj_fini(&dm->atomic_obj);
3852         return;
3853 }
3854
3855 /******************************************************************************
3856  * amdgpu_display_funcs functions
3857  *****************************************************************************/
3858
3859 /*
3860  * dm_bandwidth_update - program display watermarks
3861  *
3862  * @adev: amdgpu_device pointer
3863  *
3864  * Calculate and program the display watermarks and line buffer allocation.
3865  */
3866 static void dm_bandwidth_update(struct amdgpu_device *adev)
3867 {
3868         /* TODO: implement later */
3869 }
3870
3871 static const struct amdgpu_display_funcs dm_display_funcs = {
3872         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3873         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3874         .backlight_set_level = NULL, /* never called for DC */
3875         .backlight_get_level = NULL, /* never called for DC */
3876         .hpd_sense = NULL,/* called unconditionally */
3877         .hpd_set_polarity = NULL, /* called unconditionally */
3878         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3879         .page_flip_get_scanoutpos =
3880                 dm_crtc_get_scanoutpos,/* called unconditionally */
3881         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3882         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3883 };
3884
3885 #if defined(CONFIG_DEBUG_KERNEL_DC)
3886
3887 static ssize_t s3_debug_store(struct device *device,
3888                               struct device_attribute *attr,
3889                               const char *buf,
3890                               size_t count)
3891 {
3892         int ret;
3893         int s3_state;
3894         struct drm_device *drm_dev = dev_get_drvdata(device);
3895         struct amdgpu_device *adev = drm_to_adev(drm_dev);
3896
3897         ret = kstrtoint(buf, 0, &s3_state);
3898
3899         if (ret == 0) {
3900                 if (s3_state) {
3901                         dm_resume(adev);
3902                         drm_kms_helper_hotplug_event(adev_to_drm(adev));
3903                 } else
3904                         dm_suspend(adev);
3905         }
3906
3907         return ret == 0 ? count : 0;
3908 }
3909
3910 DEVICE_ATTR_WO(s3_debug);
3911
3912 #endif
3913
3914 static int dm_early_init(void *handle)
3915 {
3916         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3917
3918         switch (adev->asic_type) {
3919 #if defined(CONFIG_DRM_AMD_DC_SI)
3920         case CHIP_TAHITI:
3921         case CHIP_PITCAIRN:
3922         case CHIP_VERDE:
3923                 adev->mode_info.num_crtc = 6;
3924                 adev->mode_info.num_hpd = 6;
3925                 adev->mode_info.num_dig = 6;
3926                 break;
3927         case CHIP_OLAND:
3928                 adev->mode_info.num_crtc = 2;
3929                 adev->mode_info.num_hpd = 2;
3930                 adev->mode_info.num_dig = 2;
3931                 break;
3932 #endif
3933         case CHIP_BONAIRE:
3934         case CHIP_HAWAII:
3935                 adev->mode_info.num_crtc = 6;
3936                 adev->mode_info.num_hpd = 6;
3937                 adev->mode_info.num_dig = 6;
3938                 break;
3939         case CHIP_KAVERI:
3940                 adev->mode_info.num_crtc = 4;
3941                 adev->mode_info.num_hpd = 6;
3942                 adev->mode_info.num_dig = 7;
3943                 break;
3944         case CHIP_KABINI:
3945         case CHIP_MULLINS:
3946                 adev->mode_info.num_crtc = 2;
3947                 adev->mode_info.num_hpd = 6;
3948                 adev->mode_info.num_dig = 6;
3949                 break;
3950         case CHIP_FIJI:
3951         case CHIP_TONGA:
3952                 adev->mode_info.num_crtc = 6;
3953                 adev->mode_info.num_hpd = 6;
3954                 adev->mode_info.num_dig = 7;
3955                 break;
3956         case CHIP_CARRIZO:
3957                 adev->mode_info.num_crtc = 3;
3958                 adev->mode_info.num_hpd = 6;
3959                 adev->mode_info.num_dig = 9;
3960                 break;
3961         case CHIP_STONEY:
3962                 adev->mode_info.num_crtc = 2;
3963                 adev->mode_info.num_hpd = 6;
3964                 adev->mode_info.num_dig = 9;
3965                 break;
3966         case CHIP_POLARIS11:
3967         case CHIP_POLARIS12:
3968                 adev->mode_info.num_crtc = 5;
3969                 adev->mode_info.num_hpd = 5;
3970                 adev->mode_info.num_dig = 5;
3971                 break;
3972         case CHIP_POLARIS10:
3973         case CHIP_VEGAM:
3974                 adev->mode_info.num_crtc = 6;
3975                 adev->mode_info.num_hpd = 6;
3976                 adev->mode_info.num_dig = 6;
3977                 break;
3978         case CHIP_VEGA10:
3979         case CHIP_VEGA12:
3980         case CHIP_VEGA20:
3981                 adev->mode_info.num_crtc = 6;
3982                 adev->mode_info.num_hpd = 6;
3983                 adev->mode_info.num_dig = 6;
3984                 break;
3985 #if defined(CONFIG_DRM_AMD_DC_DCN)
3986         case CHIP_RAVEN:
3987         case CHIP_RENOIR:
3988         case CHIP_VANGOGH:
3989                 adev->mode_info.num_crtc = 4;
3990                 adev->mode_info.num_hpd = 4;
3991                 adev->mode_info.num_dig = 4;
3992                 break;
3993         case CHIP_NAVI10:
3994         case CHIP_NAVI12:
3995         case CHIP_SIENNA_CICHLID:
3996         case CHIP_NAVY_FLOUNDER:
3997                 adev->mode_info.num_crtc = 6;
3998                 adev->mode_info.num_hpd = 6;
3999                 adev->mode_info.num_dig = 6;
4000                 break;
4001         case CHIP_NAVI14:
4002         case CHIP_DIMGREY_CAVEFISH:
4003                 adev->mode_info.num_crtc = 5;
4004                 adev->mode_info.num_hpd = 5;
4005                 adev->mode_info.num_dig = 5;
4006                 break;
4007 #endif
4008         default:
4009                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4010                 return -EINVAL;
4011         }
4012
4013         amdgpu_dm_set_irq_funcs(adev);
4014
4015         if (adev->mode_info.funcs == NULL)
4016                 adev->mode_info.funcs = &dm_display_funcs;
4017
4018         /*
4019          * Note: Do NOT change adev->audio_endpt_rreg and
4020          * adev->audio_endpt_wreg because they are initialised in
4021          * amdgpu_device_init()
4022          */
4023 #if defined(CONFIG_DEBUG_KERNEL_DC)
4024         device_create_file(
4025                 adev_to_drm(adev)->dev,
4026                 &dev_attr_s3_debug);
4027 #endif
4028
4029         return 0;
4030 }
4031
4032 static bool modeset_required(struct drm_crtc_state *crtc_state,
4033                              struct dc_stream_state *new_stream,
4034                              struct dc_stream_state *old_stream)
4035 {
4036         return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4037 }
4038
4039 static bool modereset_required(struct drm_crtc_state *crtc_state)
4040 {
4041         return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4042 }
4043
4044 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4045 {
4046         drm_encoder_cleanup(encoder);
4047         kfree(encoder);
4048 }
4049
4050 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4051         .destroy = amdgpu_dm_encoder_destroy,
4052 };
4053
4054
4055 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4056                                          struct drm_framebuffer *fb,
4057                                          int *min_downscale, int *max_upscale)
4058 {
4059         struct amdgpu_device *adev = drm_to_adev(dev);
4060         struct dc *dc = adev->dm.dc;
4061         /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4062         struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4063
4064         switch (fb->format->format) {
4065         case DRM_FORMAT_P010:
4066         case DRM_FORMAT_NV12:
4067         case DRM_FORMAT_NV21:
4068                 *max_upscale = plane_cap->max_upscale_factor.nv12;
4069                 *min_downscale = plane_cap->max_downscale_factor.nv12;
4070                 break;
4071
4072         case DRM_FORMAT_XRGB16161616F:
4073         case DRM_FORMAT_ARGB16161616F:
4074         case DRM_FORMAT_XBGR16161616F:
4075         case DRM_FORMAT_ABGR16161616F:
4076                 *max_upscale = plane_cap->max_upscale_factor.fp16;
4077                 *min_downscale = plane_cap->max_downscale_factor.fp16;
4078                 break;
4079
4080         default:
4081                 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4082                 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4083                 break;
4084         }
4085
4086         /*
4087          * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4088          * scaling factor of 1.0 == 1000 units.
4089          */
4090         if (*max_upscale == 1)
4091                 *max_upscale = 1000;
4092
4093         if (*min_downscale == 1)
4094                 *min_downscale = 1000;
4095 }
4096
4097
4098 static int fill_dc_scaling_info(const struct drm_plane_state *state,
4099                                 struct dc_scaling_info *scaling_info)
4100 {
4101         int scale_w, scale_h, min_downscale, max_upscale;
4102
4103         memset(scaling_info, 0, sizeof(*scaling_info));
4104
4105         /* Source is fixed 16.16 but we ignore mantissa for now... */
4106         scaling_info->src_rect.x = state->src_x >> 16;
4107         scaling_info->src_rect.y = state->src_y >> 16;
4108
4109         /*
4110          * For reasons we don't (yet) fully understand a non-zero
4111          * src_y coordinate into an NV12 buffer can cause a
4112          * system hang. To avoid hangs (and maybe be overly cautious)
4113          * let's reject both non-zero src_x and src_y.
4114          *
4115          * We currently know of only one use-case to reproduce a
4116          * scenario with non-zero src_x and src_y for NV12, which
4117          * is to gesture the YouTube Android app into full screen
4118          * on ChromeOS.
4119          */
4120         if (state->fb &&
4121             state->fb->format->format == DRM_FORMAT_NV12 &&
4122             (scaling_info->src_rect.x != 0 ||
4123              scaling_info->src_rect.y != 0))
4124                 return -EINVAL;
4125
4126         scaling_info->src_rect.width = state->src_w >> 16;
4127         if (scaling_info->src_rect.width == 0)
4128                 return -EINVAL;
4129
4130         scaling_info->src_rect.height = state->src_h >> 16;
4131         if (scaling_info->src_rect.height == 0)
4132                 return -EINVAL;
4133
4134         scaling_info->dst_rect.x = state->crtc_x;
4135         scaling_info->dst_rect.y = state->crtc_y;
4136
4137         if (state->crtc_w == 0)
4138                 return -EINVAL;
4139
4140         scaling_info->dst_rect.width = state->crtc_w;
4141
4142         if (state->crtc_h == 0)
4143                 return -EINVAL;
4144
4145         scaling_info->dst_rect.height = state->crtc_h;
4146
4147         /* DRM doesn't specify clipping on destination output. */
4148         scaling_info->clip_rect = scaling_info->dst_rect;
4149
4150         /* Validate scaling per-format with DC plane caps */
4151         if (state->plane && state->plane->dev && state->fb) {
4152                 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4153                                              &min_downscale, &max_upscale);
4154         } else {
4155                 min_downscale = 250;
4156                 max_upscale = 16000;
4157         }
4158
4159         scale_w = scaling_info->dst_rect.width * 1000 /
4160                   scaling_info->src_rect.width;
4161
4162         if (scale_w < min_downscale || scale_w > max_upscale)
4163                 return -EINVAL;
4164
4165         scale_h = scaling_info->dst_rect.height * 1000 /
4166                   scaling_info->src_rect.height;
4167
4168         if (scale_h < min_downscale || scale_h > max_upscale)
4169                 return -EINVAL;
4170
4171         /*
4172          * The "scaling_quality" can be ignored for now, quality = 0 has DC
4173          * assume reasonable defaults based on the format.
4174          */
4175
4176         return 0;
4177 }
4178
4179 static void
4180 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4181                                  uint64_t tiling_flags)
4182 {
4183         /* Fill GFX8 params */
4184         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4185                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4186
4187                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4188                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4189                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4190                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4191                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4192
4193                 /* XXX fix me for VI */
4194                 tiling_info->gfx8.num_banks = num_banks;
4195                 tiling_info->gfx8.array_mode =
4196                                 DC_ARRAY_2D_TILED_THIN1;
4197                 tiling_info->gfx8.tile_split = tile_split;
4198                 tiling_info->gfx8.bank_width = bankw;
4199                 tiling_info->gfx8.bank_height = bankh;
4200                 tiling_info->gfx8.tile_aspect = mtaspect;
4201                 tiling_info->gfx8.tile_mode =
4202                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4203         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4204                         == DC_ARRAY_1D_TILED_THIN1) {
4205                 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4206         }
4207
4208         tiling_info->gfx8.pipe_config =
4209                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4210 }
4211
4212 static void
4213 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4214                                   union dc_tiling_info *tiling_info)
4215 {
4216         tiling_info->gfx9.num_pipes =
4217                 adev->gfx.config.gb_addr_config_fields.num_pipes;
4218         tiling_info->gfx9.num_banks =
4219                 adev->gfx.config.gb_addr_config_fields.num_banks;
4220         tiling_info->gfx9.pipe_interleave =
4221                 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4222         tiling_info->gfx9.num_shader_engines =
4223                 adev->gfx.config.gb_addr_config_fields.num_se;
4224         tiling_info->gfx9.max_compressed_frags =
4225                 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4226         tiling_info->gfx9.num_rb_per_se =
4227                 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4228         tiling_info->gfx9.shaderEnable = 1;
4229         if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4230             adev->asic_type == CHIP_NAVY_FLOUNDER ||
4231             adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4232             adev->asic_type == CHIP_VANGOGH)
4233                 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4234 }
4235
4236 static int
4237 validate_dcc(struct amdgpu_device *adev,
4238              const enum surface_pixel_format format,
4239              const enum dc_rotation_angle rotation,
4240              const union dc_tiling_info *tiling_info,
4241              const struct dc_plane_dcc_param *dcc,
4242              const struct dc_plane_address *address,
4243              const struct plane_size *plane_size)
4244 {
4245         struct dc *dc = adev->dm.dc;
4246         struct dc_dcc_surface_param input;
4247         struct dc_surface_dcc_cap output;
4248
4249         memset(&input, 0, sizeof(input));
4250         memset(&output, 0, sizeof(output));
4251
4252         if (!dcc->enable)
4253                 return 0;
4254
4255         if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4256             !dc->cap_funcs.get_dcc_compression_cap)
4257                 return -EINVAL;
4258
4259         input.format = format;
4260         input.surface_size.width = plane_size->surface_size.width;
4261         input.surface_size.height = plane_size->surface_size.height;
4262         input.swizzle_mode = tiling_info->gfx9.swizzle;
4263
4264         if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4265                 input.scan = SCAN_DIRECTION_HORIZONTAL;
4266         else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4267                 input.scan = SCAN_DIRECTION_VERTICAL;
4268
4269         if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4270                 return -EINVAL;
4271
4272         if (!output.capable)
4273                 return -EINVAL;
4274
4275         if (dcc->independent_64b_blks == 0 &&
4276             output.grph.rgb.independent_64b_blks != 0)
4277                 return -EINVAL;
4278
4279         return 0;
4280 }
4281
4282 static bool
4283 modifier_has_dcc(uint64_t modifier)
4284 {
4285         return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4286 }
4287
4288 static unsigned
4289 modifier_gfx9_swizzle_mode(uint64_t modifier)
4290 {
4291         if (modifier == DRM_FORMAT_MOD_LINEAR)
4292                 return 0;
4293
4294         return AMD_FMT_MOD_GET(TILE, modifier);
4295 }
4296
4297 static const struct drm_format_info *
4298 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4299 {
4300         return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4301 }
4302
4303 static void
4304 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4305                                     union dc_tiling_info *tiling_info,
4306                                     uint64_t modifier)
4307 {
4308         unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4309         unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4310         unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4311         unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4312
4313         fill_gfx9_tiling_info_from_device(adev, tiling_info);
4314
4315         if (!IS_AMD_FMT_MOD(modifier))
4316                 return;
4317
4318         tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4319         tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4320
4321         if (adev->family >= AMDGPU_FAMILY_NV) {
4322                 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4323         } else {
4324                 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4325
4326                 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4327         }
4328 }
4329
4330 enum dm_micro_swizzle {
4331         MICRO_SWIZZLE_Z = 0,
4332         MICRO_SWIZZLE_S = 1,
4333         MICRO_SWIZZLE_D = 2,
4334         MICRO_SWIZZLE_R = 3
4335 };
4336
4337 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4338                                           uint32_t format,
4339                                           uint64_t modifier)
4340 {
4341         struct amdgpu_device *adev = drm_to_adev(plane->dev);
4342         const struct drm_format_info *info = drm_format_info(format);
4343         int i;
4344
4345         enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4346
4347         if (!info)
4348                 return false;
4349
4350         /*
4351          * We always have to allow these modifiers:
4352          * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4353          * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4354          */
4355         if (modifier == DRM_FORMAT_MOD_LINEAR ||
4356             modifier == DRM_FORMAT_MOD_INVALID) {
4357                 return true;
4358         }
4359
4360         /* Check that the modifier is on the list of the plane's supported modifiers. */
4361         for (i = 0; i < plane->modifier_count; i++) {
4362                 if (modifier == plane->modifiers[i])
4363                         break;
4364         }
4365         if (i == plane->modifier_count)
4366                 return false;
4367
4368         /*
4369          * For D swizzle the canonical modifier depends on the bpp, so check
4370          * it here.
4371          */
4372         if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4373             adev->family >= AMDGPU_FAMILY_NV) {
4374                 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4375                         return false;
4376         }
4377
4378         if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4379             info->cpp[0] < 8)
4380                 return false;
4381
4382         if (modifier_has_dcc(modifier)) {
4383                 /* Per radeonsi comments 16/64 bpp are more complicated. */
4384                 if (info->cpp[0] != 4)
4385                         return false;
4386                 /* We support multi-planar formats, but not when combined with
4387                  * additional DCC metadata planes. */
4388                 if (info->num_planes > 1)
4389                         return false;
4390         }
4391
4392         return true;
4393 }
4394
4395 static void
4396 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4397 {
4398         if (!*mods)
4399                 return;
4400
4401         if (*cap - *size < 1) {
4402                 uint64_t new_cap = *cap * 2;
4403                 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4404
4405                 if (!new_mods) {
4406                         kfree(*mods);
4407                         *mods = NULL;
4408                         return;
4409                 }
4410
4411                 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4412                 kfree(*mods);
4413                 *mods = new_mods;
4414                 *cap = new_cap;
4415         }
4416
4417         (*mods)[*size] = mod;
4418         *size += 1;
4419 }
4420
4421 static void
4422 add_gfx9_modifiers(const struct amdgpu_device *adev,
4423                    uint64_t **mods, uint64_t *size, uint64_t *capacity)
4424 {
4425         int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4426         int pipe_xor_bits = min(8, pipes +
4427                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4428         int bank_xor_bits = min(8 - pipe_xor_bits,
4429                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4430         int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4431                  ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4432
4433
4434         if (adev->family == AMDGPU_FAMILY_RV) {
4435                 /* Raven2 and later */
4436                 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4437
4438                 /*
4439                  * No _D DCC swizzles yet because we only allow 32bpp, which
4440                  * doesn't support _D on DCN
4441                  */
4442
4443                 if (has_constant_encode) {
4444                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4445                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4446                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4447                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4448                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4449                                     AMD_FMT_MOD_SET(DCC, 1) |
4450                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4451                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4452                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4453                 }
4454
4455                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4456                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4457                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4458                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4459                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4460                             AMD_FMT_MOD_SET(DCC, 1) |
4461                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4462                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4463                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4464
4465                 if (has_constant_encode) {
4466                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4467                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4468                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4469                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4470                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4471                                     AMD_FMT_MOD_SET(DCC, 1) |
4472                                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4473                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4474                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4475
4476                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4477                                     AMD_FMT_MOD_SET(RB, rb) |
4478                                     AMD_FMT_MOD_SET(PIPE, pipes));
4479                 }
4480
4481                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4482                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4483                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4484                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4485                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4486                             AMD_FMT_MOD_SET(DCC, 1) |
4487                             AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4488                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4489                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4490                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4491                             AMD_FMT_MOD_SET(RB, rb) |
4492                             AMD_FMT_MOD_SET(PIPE, pipes));
4493         }
4494
4495         /*
4496          * Only supported for 64bpp on Raven, will be filtered on format in
4497          * dm_plane_format_mod_supported.
4498          */
4499         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4500                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4501                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4502                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4503                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4504
4505         if (adev->family == AMDGPU_FAMILY_RV) {
4506                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4507                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4508                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4509                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4510                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4511         }
4512
4513         /*
4514          * Only supported for 64bpp on Raven, will be filtered on format in
4515          * dm_plane_format_mod_supported.
4516          */
4517         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4518                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4519                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4520
4521         if (adev->family == AMDGPU_FAMILY_RV) {
4522                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4523                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4524                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4525         }
4526 }
4527
4528 static void
4529 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4530                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
4531 {
4532         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4533
4534         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4535                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4536                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4537                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4538                     AMD_FMT_MOD_SET(DCC, 1) |
4539                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4540                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4541                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4542
4543         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4544                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4545                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4546                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4547                     AMD_FMT_MOD_SET(DCC, 1) |
4548                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4549                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4550                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4551                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4552
4553         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4554                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4555                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4556                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4557
4558         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4559                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4560                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4561                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4562
4563
4564         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4565         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4566                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4567                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4568
4569         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4570                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4571                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4572 }
4573
4574 static void
4575 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4576                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
4577 {
4578         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4579         int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4580
4581         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4582                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4583                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4584                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4585                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
4586                     AMD_FMT_MOD_SET(DCC, 1) |
4587                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4588                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4589                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4590                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4591
4592         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4593                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4594                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4595                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4596                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
4597                     AMD_FMT_MOD_SET(DCC, 1) |
4598                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4599                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4600                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4601                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4602                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4603
4604         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4605                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4606                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4607                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4608                     AMD_FMT_MOD_SET(PACKERS, pkrs));
4609
4610         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4611                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4612                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4613                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4614                     AMD_FMT_MOD_SET(PACKERS, pkrs));
4615
4616         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4617         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4618                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4619                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4620
4621         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4622                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4623                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4624 }
4625
4626 static int
4627 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4628 {
4629         uint64_t size = 0, capacity = 128;
4630         *mods = NULL;
4631
4632         /* We have not hooked up any pre-GFX9 modifiers. */
4633         if (adev->family < AMDGPU_FAMILY_AI)
4634                 return 0;
4635
4636         *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4637
4638         if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4639                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4640                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4641                 return *mods ? 0 : -ENOMEM;
4642         }
4643
4644         switch (adev->family) {
4645         case AMDGPU_FAMILY_AI:
4646         case AMDGPU_FAMILY_RV:
4647                 add_gfx9_modifiers(adev, mods, &size, &capacity);
4648                 break;
4649         case AMDGPU_FAMILY_NV:
4650         case AMDGPU_FAMILY_VGH:
4651                 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4652                         add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4653                 else
4654                         add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4655                 break;
4656         }
4657
4658         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4659
4660         /* INVALID marks the end of the list. */
4661         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4662
4663         if (!*mods)
4664                 return -ENOMEM;
4665
4666         return 0;
4667 }
4668
4669 static int
4670 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4671                                           const struct amdgpu_framebuffer *afb,
4672                                           const enum surface_pixel_format format,
4673                                           const enum dc_rotation_angle rotation,
4674                                           const struct plane_size *plane_size,
4675                                           union dc_tiling_info *tiling_info,
4676                                           struct dc_plane_dcc_param *dcc,
4677                                           struct dc_plane_address *address,
4678                                           const bool force_disable_dcc)
4679 {
4680         const uint64_t modifier = afb->base.modifier;
4681         int ret;
4682
4683         fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4684         tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4685
4686         if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4687                 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4688
4689                 dcc->enable = 1;
4690                 dcc->meta_pitch = afb->base.pitches[1];
4691                 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4692
4693                 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4694                 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4695         }
4696
4697         ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4698         if (ret)
4699                 return ret;
4700
4701         return 0;
4702 }
4703
4704 static int
4705 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4706                              const struct amdgpu_framebuffer *afb,
4707                              const enum surface_pixel_format format,
4708                              const enum dc_rotation_angle rotation,
4709                              const uint64_t tiling_flags,
4710                              union dc_tiling_info *tiling_info,
4711                              struct plane_size *plane_size,
4712                              struct dc_plane_dcc_param *dcc,
4713                              struct dc_plane_address *address,
4714                              bool tmz_surface,
4715                              bool force_disable_dcc)
4716 {
4717         const struct drm_framebuffer *fb = &afb->base;
4718         int ret;
4719
4720         memset(tiling_info, 0, sizeof(*tiling_info));
4721         memset(plane_size, 0, sizeof(*plane_size));
4722         memset(dcc, 0, sizeof(*dcc));
4723         memset(address, 0, sizeof(*address));
4724
4725         address->tmz_surface = tmz_surface;
4726
4727         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4728                 uint64_t addr = afb->address + fb->offsets[0];
4729
4730                 plane_size->surface_size.x = 0;
4731                 plane_size->surface_size.y = 0;
4732                 plane_size->surface_size.width = fb->width;
4733                 plane_size->surface_size.height = fb->height;
4734                 plane_size->surface_pitch =
4735                         fb->pitches[0] / fb->format->cpp[0];
4736
4737                 address->type = PLN_ADDR_TYPE_GRAPHICS;
4738                 address->grph.addr.low_part = lower_32_bits(addr);
4739                 address->grph.addr.high_part = upper_32_bits(addr);
4740         } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4741                 uint64_t luma_addr = afb->address + fb->offsets[0];
4742                 uint64_t chroma_addr = afb->address + fb->offsets[1];
4743
4744                 plane_size->surface_size.x = 0;
4745                 plane_size->surface_size.y = 0;
4746                 plane_size->surface_size.width = fb->width;
4747                 plane_size->surface_size.height = fb->height;
4748                 plane_size->surface_pitch =
4749                         fb->pitches[0] / fb->format->cpp[0];
4750
4751                 plane_size->chroma_size.x = 0;
4752                 plane_size->chroma_size.y = 0;
4753                 /* TODO: set these based on surface format */
4754                 plane_size->chroma_size.width = fb->width / 2;
4755                 plane_size->chroma_size.height = fb->height / 2;
4756
4757                 plane_size->chroma_pitch =
4758                         fb->pitches[1] / fb->format->cpp[1];
4759
4760                 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4761                 address->video_progressive.luma_addr.low_part =
4762                         lower_32_bits(luma_addr);
4763                 address->video_progressive.luma_addr.high_part =
4764                         upper_32_bits(luma_addr);
4765                 address->video_progressive.chroma_addr.low_part =
4766                         lower_32_bits(chroma_addr);
4767                 address->video_progressive.chroma_addr.high_part =
4768                         upper_32_bits(chroma_addr);
4769         }
4770
4771         if (adev->family >= AMDGPU_FAMILY_AI) {
4772                 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4773                                                                 rotation, plane_size,
4774                                                                 tiling_info, dcc,
4775                                                                 address,
4776                                                                 force_disable_dcc);
4777                 if (ret)
4778                         return ret;
4779         } else {
4780                 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4781         }
4782
4783         return 0;
4784 }
4785
4786 static void
4787 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4788                                bool *per_pixel_alpha, bool *global_alpha,
4789                                int *global_alpha_value)
4790 {
4791         *per_pixel_alpha = false;
4792         *global_alpha = false;
4793         *global_alpha_value = 0xff;
4794
4795         if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4796                 return;
4797
4798         if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4799                 static const uint32_t alpha_formats[] = {
4800                         DRM_FORMAT_ARGB8888,
4801                         DRM_FORMAT_RGBA8888,
4802                         DRM_FORMAT_ABGR8888,
4803                 };
4804                 uint32_t format = plane_state->fb->format->format;
4805                 unsigned int i;
4806
4807                 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4808                         if (format == alpha_formats[i]) {
4809                                 *per_pixel_alpha = true;
4810                                 break;
4811                         }
4812                 }
4813         }
4814
4815         if (plane_state->alpha < 0xffff) {
4816                 *global_alpha = true;
4817                 *global_alpha_value = plane_state->alpha >> 8;
4818         }
4819 }
4820
4821 static int
4822 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4823                             const enum surface_pixel_format format,
4824                             enum dc_color_space *color_space)
4825 {
4826         bool full_range;
4827
4828         *color_space = COLOR_SPACE_SRGB;
4829
4830         /* DRM color properties only affect non-RGB formats. */
4831         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4832                 return 0;
4833
4834         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4835
4836         switch (plane_state->color_encoding) {
4837         case DRM_COLOR_YCBCR_BT601:
4838                 if (full_range)
4839                         *color_space = COLOR_SPACE_YCBCR601;
4840                 else
4841                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4842                 break;
4843
4844         case DRM_COLOR_YCBCR_BT709:
4845                 if (full_range)
4846                         *color_space = COLOR_SPACE_YCBCR709;
4847                 else
4848                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4849                 break;
4850
4851         case DRM_COLOR_YCBCR_BT2020:
4852                 if (full_range)
4853                         *color_space = COLOR_SPACE_2020_YCBCR;
4854                 else
4855                         return -EINVAL;
4856                 break;
4857
4858         default:
4859                 return -EINVAL;
4860         }
4861
4862         return 0;
4863 }
4864
4865 static int
4866 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4867                             const struct drm_plane_state *plane_state,
4868                             const uint64_t tiling_flags,
4869                             struct dc_plane_info *plane_info,
4870                             struct dc_plane_address *address,
4871                             bool tmz_surface,
4872                             bool force_disable_dcc)
4873 {
4874         const struct drm_framebuffer *fb = plane_state->fb;
4875         const struct amdgpu_framebuffer *afb =
4876                 to_amdgpu_framebuffer(plane_state->fb);
4877         int ret;
4878
4879         memset(plane_info, 0, sizeof(*plane_info));
4880
4881         switch (fb->format->format) {
4882         case DRM_FORMAT_C8:
4883                 plane_info->format =
4884                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4885                 break;
4886         case DRM_FORMAT_RGB565:
4887                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4888                 break;
4889         case DRM_FORMAT_XRGB8888:
4890         case DRM_FORMAT_ARGB8888:
4891                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4892                 break;
4893         case DRM_FORMAT_XRGB2101010:
4894         case DRM_FORMAT_ARGB2101010:
4895                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4896                 break;
4897         case DRM_FORMAT_XBGR2101010:
4898         case DRM_FORMAT_ABGR2101010:
4899                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4900                 break;
4901         case DRM_FORMAT_XBGR8888:
4902         case DRM_FORMAT_ABGR8888:
4903                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4904                 break;
4905         case DRM_FORMAT_NV21:
4906                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4907                 break;
4908         case DRM_FORMAT_NV12:
4909                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4910                 break;
4911         case DRM_FORMAT_P010:
4912                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4913                 break;
4914         case DRM_FORMAT_XRGB16161616F:
4915         case DRM_FORMAT_ARGB16161616F:
4916                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4917                 break;
4918         case DRM_FORMAT_XBGR16161616F:
4919         case DRM_FORMAT_ABGR16161616F:
4920                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4921                 break;
4922         default:
4923                 DRM_ERROR(
4924                         "Unsupported screen format %p4cc\n",
4925                         &fb->format->format);
4926                 return -EINVAL;
4927         }
4928
4929         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4930         case DRM_MODE_ROTATE_0:
4931                 plane_info->rotation = ROTATION_ANGLE_0;
4932                 break;
4933         case DRM_MODE_ROTATE_90:
4934                 plane_info->rotation = ROTATION_ANGLE_90;
4935                 break;
4936         case DRM_MODE_ROTATE_180:
4937                 plane_info->rotation = ROTATION_ANGLE_180;
4938                 break;
4939         case DRM_MODE_ROTATE_270:
4940                 plane_info->rotation = ROTATION_ANGLE_270;
4941                 break;
4942         default:
4943                 plane_info->rotation = ROTATION_ANGLE_0;
4944                 break;
4945         }
4946
4947         plane_info->visible = true;
4948         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4949
4950         plane_info->layer_index = 0;
4951
4952         ret = fill_plane_color_attributes(plane_state, plane_info->format,
4953                                           &plane_info->color_space);
4954         if (ret)
4955                 return ret;
4956
4957         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4958                                            plane_info->rotation, tiling_flags,
4959                                            &plane_info->tiling_info,
4960                                            &plane_info->plane_size,
4961                                            &plane_info->dcc, address, tmz_surface,
4962                                            force_disable_dcc);
4963         if (ret)
4964                 return ret;
4965
4966         fill_blending_from_plane_state(
4967                 plane_state, &plane_info->per_pixel_alpha,
4968                 &plane_info->global_alpha, &plane_info->global_alpha_value);
4969
4970         return 0;
4971 }
4972
4973 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4974                                     struct dc_plane_state *dc_plane_state,
4975                                     struct drm_plane_state *plane_state,
4976                                     struct drm_crtc_state *crtc_state)
4977 {
4978         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4979         struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4980         struct dc_scaling_info scaling_info;
4981         struct dc_plane_info plane_info;
4982         int ret;
4983         bool force_disable_dcc = false;
4984
4985         ret = fill_dc_scaling_info(plane_state, &scaling_info);
4986         if (ret)
4987                 return ret;
4988
4989         dc_plane_state->src_rect = scaling_info.src_rect;
4990         dc_plane_state->dst_rect = scaling_info.dst_rect;
4991         dc_plane_state->clip_rect = scaling_info.clip_rect;
4992         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4993
4994         force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4995         ret = fill_dc_plane_info_and_addr(adev, plane_state,
4996                                           afb->tiling_flags,
4997                                           &plane_info,
4998                                           &dc_plane_state->address,
4999                                           afb->tmz_surface,
5000                                           force_disable_dcc);
5001         if (ret)
5002                 return ret;
5003
5004         dc_plane_state->format = plane_info.format;
5005         dc_plane_state->color_space = plane_info.color_space;
5006         dc_plane_state->format = plane_info.format;
5007         dc_plane_state->plane_size = plane_info.plane_size;
5008         dc_plane_state->rotation = plane_info.rotation;
5009         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5010         dc_plane_state->stereo_format = plane_info.stereo_format;
5011         dc_plane_state->tiling_info = plane_info.tiling_info;
5012         dc_plane_state->visible = plane_info.visible;
5013         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5014         dc_plane_state->global_alpha = plane_info.global_alpha;
5015         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5016         dc_plane_state->dcc = plane_info.dcc;
5017         dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5018         dc_plane_state->flip_int_enabled = true;
5019
5020         /*
5021          * Always set input transfer function, since plane state is refreshed
5022          * every time.
5023          */
5024         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5025         if (ret)
5026                 return ret;
5027
5028         return 0;
5029 }
5030
5031 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5032                                            const struct dm_connector_state *dm_state,
5033                                            struct dc_stream_state *stream)
5034 {
5035         enum amdgpu_rmx_type rmx_type;
5036
5037         struct rect src = { 0 }; /* viewport in composition space*/
5038         struct rect dst = { 0 }; /* stream addressable area */
5039
5040         /* no mode. nothing to be done */
5041         if (!mode)
5042                 return;
5043
5044         /* Full screen scaling by default */
5045         src.width = mode->hdisplay;
5046         src.height = mode->vdisplay;
5047         dst.width = stream->timing.h_addressable;
5048         dst.height = stream->timing.v_addressable;
5049
5050         if (dm_state) {
5051                 rmx_type = dm_state->scaling;
5052                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5053                         if (src.width * dst.height <
5054                                         src.height * dst.width) {
5055                                 /* height needs less upscaling/more downscaling */
5056                                 dst.width = src.width *
5057                                                 dst.height / src.height;
5058                         } else {
5059                                 /* width needs less upscaling/more downscaling */
5060                                 dst.height = src.height *
5061                                                 dst.width / src.width;
5062                         }
5063                 } else if (rmx_type == RMX_CENTER) {
5064                         dst = src;
5065                 }
5066
5067                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5068                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
5069
5070                 if (dm_state->underscan_enable) {
5071                         dst.x += dm_state->underscan_hborder / 2;
5072                         dst.y += dm_state->underscan_vborder / 2;
5073                         dst.width -= dm_state->underscan_hborder;
5074                         dst.height -= dm_state->underscan_vborder;
5075                 }
5076         }
5077
5078         stream->src = src;
5079         stream->dst = dst;
5080
5081         DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5082                       dst.x, dst.y, dst.width, dst.height);
5083
5084 }
5085
5086 static enum dc_color_depth
5087 convert_color_depth_from_display_info(const struct drm_connector *connector,
5088                                       bool is_y420, int requested_bpc)
5089 {
5090         uint8_t bpc;
5091
5092         if (is_y420) {
5093                 bpc = 8;
5094
5095                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5096                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5097                         bpc = 16;
5098                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5099                         bpc = 12;
5100                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5101                         bpc = 10;
5102         } else {
5103                 bpc = (uint8_t)connector->display_info.bpc;
5104                 /* Assume 8 bpc by default if no bpc is specified. */
5105                 bpc = bpc ? bpc : 8;
5106         }
5107
5108         if (requested_bpc > 0) {
5109                 /*
5110                  * Cap display bpc based on the user requested value.
5111                  *
5112                  * The value for state->max_bpc may not correctly updated
5113                  * depending on when the connector gets added to the state
5114                  * or if this was called outside of atomic check, so it
5115                  * can't be used directly.
5116                  */
5117                 bpc = min_t(u8, bpc, requested_bpc);
5118
5119                 /* Round down to the nearest even number. */
5120                 bpc = bpc - (bpc & 1);
5121         }
5122
5123         switch (bpc) {
5124         case 0:
5125                 /*
5126                  * Temporary Work around, DRM doesn't parse color depth for
5127                  * EDID revision before 1.4
5128                  * TODO: Fix edid parsing
5129                  */
5130                 return COLOR_DEPTH_888;
5131         case 6:
5132                 return COLOR_DEPTH_666;
5133         case 8:
5134                 return COLOR_DEPTH_888;
5135         case 10:
5136                 return COLOR_DEPTH_101010;
5137         case 12:
5138                 return COLOR_DEPTH_121212;
5139         case 14:
5140                 return COLOR_DEPTH_141414;
5141         case 16:
5142                 return COLOR_DEPTH_161616;
5143         default:
5144                 return COLOR_DEPTH_UNDEFINED;
5145         }
5146 }
5147
5148 static enum dc_aspect_ratio
5149 get_aspect_ratio(const struct drm_display_mode *mode_in)
5150 {
5151         /* 1-1 mapping, since both enums follow the HDMI spec. */
5152         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5153 }
5154
5155 static enum dc_color_space
5156 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5157 {
5158         enum dc_color_space color_space = COLOR_SPACE_SRGB;
5159
5160         switch (dc_crtc_timing->pixel_encoding) {
5161         case PIXEL_ENCODING_YCBCR422:
5162         case PIXEL_ENCODING_YCBCR444:
5163         case PIXEL_ENCODING_YCBCR420:
5164         {
5165                 /*
5166                  * 27030khz is the separation point between HDTV and SDTV
5167                  * according to HDMI spec, we use YCbCr709 and YCbCr601
5168                  * respectively
5169                  */
5170                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
5171                         if (dc_crtc_timing->flags.Y_ONLY)
5172                                 color_space =
5173                                         COLOR_SPACE_YCBCR709_LIMITED;
5174                         else
5175                                 color_space = COLOR_SPACE_YCBCR709;
5176                 } else {
5177                         if (dc_crtc_timing->flags.Y_ONLY)
5178                                 color_space =
5179                                         COLOR_SPACE_YCBCR601_LIMITED;
5180                         else
5181                                 color_space = COLOR_SPACE_YCBCR601;
5182                 }
5183
5184         }
5185         break;
5186         case PIXEL_ENCODING_RGB:
5187                 color_space = COLOR_SPACE_SRGB;
5188                 break;
5189
5190         default:
5191                 WARN_ON(1);
5192                 break;
5193         }
5194
5195         return color_space;
5196 }
5197
5198 static bool adjust_colour_depth_from_display_info(
5199         struct dc_crtc_timing *timing_out,
5200         const struct drm_display_info *info)
5201 {
5202         enum dc_color_depth depth = timing_out->display_color_depth;
5203         int normalized_clk;
5204         do {
5205                 normalized_clk = timing_out->pix_clk_100hz / 10;
5206                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5207                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5208                         normalized_clk /= 2;
5209                 /* Adjusting pix clock following on HDMI spec based on colour depth */
5210                 switch (depth) {
5211                 case COLOR_DEPTH_888:
5212                         break;
5213                 case COLOR_DEPTH_101010:
5214                         normalized_clk = (normalized_clk * 30) / 24;
5215                         break;
5216                 case COLOR_DEPTH_121212:
5217                         normalized_clk = (normalized_clk * 36) / 24;
5218                         break;
5219                 case COLOR_DEPTH_161616:
5220                         normalized_clk = (normalized_clk * 48) / 24;
5221                         break;
5222                 default:
5223                         /* The above depths are the only ones valid for HDMI. */
5224                         return false;
5225                 }
5226                 if (normalized_clk <= info->max_tmds_clock) {
5227                         timing_out->display_color_depth = depth;
5228                         return true;
5229                 }
5230         } while (--depth > COLOR_DEPTH_666);
5231         return false;
5232 }
5233
5234 static void fill_stream_properties_from_drm_display_mode(
5235         struct dc_stream_state *stream,
5236         const struct drm_display_mode *mode_in,
5237         const struct drm_connector *connector,
5238         const struct drm_connector_state *connector_state,
5239         const struct dc_stream_state *old_stream,
5240         int requested_bpc)
5241 {
5242         struct dc_crtc_timing *timing_out = &stream->timing;
5243         const struct drm_display_info *info = &connector->display_info;
5244         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5245         struct hdmi_vendor_infoframe hv_frame;
5246         struct hdmi_avi_infoframe avi_frame;
5247
5248         memset(&hv_frame, 0, sizeof(hv_frame));
5249         memset(&avi_frame, 0, sizeof(avi_frame));
5250
5251         timing_out->h_border_left = 0;
5252         timing_out->h_border_right = 0;
5253         timing_out->v_border_top = 0;
5254         timing_out->v_border_bottom = 0;
5255         /* TODO: un-hardcode */
5256         if (drm_mode_is_420_only(info, mode_in)
5257                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5258                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5259         else if (drm_mode_is_420_also(info, mode_in)
5260                         && aconnector->force_yuv420_output)
5261                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5262         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5263                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5264                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5265         else
5266                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5267
5268         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5269         timing_out->display_color_depth = convert_color_depth_from_display_info(
5270                 connector,
5271                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5272                 requested_bpc);
5273         timing_out->scan_type = SCANNING_TYPE_NODATA;
5274         timing_out->hdmi_vic = 0;
5275
5276         if(old_stream) {
5277                 timing_out->vic = old_stream->timing.vic;
5278                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5279                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5280         } else {
5281                 timing_out->vic = drm_match_cea_mode(mode_in);
5282                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5283                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5284                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5285                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5286         }
5287
5288         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5289                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5290                 timing_out->vic = avi_frame.video_code;
5291                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5292                 timing_out->hdmi_vic = hv_frame.vic;
5293         }
5294
5295         if (is_freesync_video_mode(mode_in, aconnector)) {
5296                 timing_out->h_addressable = mode_in->hdisplay;
5297                 timing_out->h_total = mode_in->htotal;
5298                 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5299                 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5300                 timing_out->v_total = mode_in->vtotal;
5301                 timing_out->v_addressable = mode_in->vdisplay;
5302                 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5303                 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5304                 timing_out->pix_clk_100hz = mode_in->clock * 10;
5305         } else {
5306                 timing_out->h_addressable = mode_in->crtc_hdisplay;
5307                 timing_out->h_total = mode_in->crtc_htotal;
5308                 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5309                 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5310                 timing_out->v_total = mode_in->crtc_vtotal;
5311                 timing_out->v_addressable = mode_in->crtc_vdisplay;
5312                 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5313                 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5314                 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5315         }
5316
5317         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5318
5319         stream->output_color_space = get_output_color_space(timing_out);
5320
5321         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5322         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5323         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5324                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5325                     drm_mode_is_420_also(info, mode_in) &&
5326                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5327                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5328                         adjust_colour_depth_from_display_info(timing_out, info);
5329                 }
5330         }
5331 }
5332
5333 static void fill_audio_info(struct audio_info *audio_info,
5334                             const struct drm_connector *drm_connector,
5335                             const struct dc_sink *dc_sink)
5336 {
5337         int i = 0;
5338         int cea_revision = 0;
5339         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5340
5341         audio_info->manufacture_id = edid_caps->manufacturer_id;
5342         audio_info->product_id = edid_caps->product_id;
5343
5344         cea_revision = drm_connector->display_info.cea_rev;
5345
5346         strscpy(audio_info->display_name,
5347                 edid_caps->display_name,
5348                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5349
5350         if (cea_revision >= 3) {
5351                 audio_info->mode_count = edid_caps->audio_mode_count;
5352
5353                 for (i = 0; i < audio_info->mode_count; ++i) {
5354                         audio_info->modes[i].format_code =
5355                                         (enum audio_format_code)
5356                                         (edid_caps->audio_modes[i].format_code);
5357                         audio_info->modes[i].channel_count =
5358                                         edid_caps->audio_modes[i].channel_count;
5359                         audio_info->modes[i].sample_rates.all =
5360                                         edid_caps->audio_modes[i].sample_rate;
5361                         audio_info->modes[i].sample_size =
5362                                         edid_caps->audio_modes[i].sample_size;
5363                 }
5364         }
5365
5366         audio_info->flags.all = edid_caps->speaker_flags;
5367
5368         /* TODO: We only check for the progressive mode, check for interlace mode too */
5369         if (drm_connector->latency_present[0]) {
5370                 audio_info->video_latency = drm_connector->video_latency[0];
5371                 audio_info->audio_latency = drm_connector->audio_latency[0];
5372         }
5373
5374         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5375
5376 }
5377
5378 static void
5379 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5380                                       struct drm_display_mode *dst_mode)
5381 {
5382         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5383         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5384         dst_mode->crtc_clock = src_mode->crtc_clock;
5385         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5386         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5387         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5388         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5389         dst_mode->crtc_htotal = src_mode->crtc_htotal;
5390         dst_mode->crtc_hskew = src_mode->crtc_hskew;
5391         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5392         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5393         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5394         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5395         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5396 }
5397
5398 static void
5399 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5400                                         const struct drm_display_mode *native_mode,
5401                                         bool scale_enabled)
5402 {
5403         if (scale_enabled) {
5404                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5405         } else if (native_mode->clock == drm_mode->clock &&
5406                         native_mode->htotal == drm_mode->htotal &&
5407                         native_mode->vtotal == drm_mode->vtotal) {
5408                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5409         } else {
5410                 /* no scaling nor amdgpu inserted, no need to patch */
5411         }
5412 }
5413
5414 static struct dc_sink *
5415 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5416 {
5417         struct dc_sink_init_data sink_init_data = { 0 };
5418         struct dc_sink *sink = NULL;
5419         sink_init_data.link = aconnector->dc_link;
5420         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5421
5422         sink = dc_sink_create(&sink_init_data);
5423         if (!sink) {
5424                 DRM_ERROR("Failed to create sink!\n");
5425                 return NULL;
5426         }
5427         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5428
5429         return sink;
5430 }
5431
5432 static void set_multisync_trigger_params(
5433                 struct dc_stream_state *stream)
5434 {
5435         struct dc_stream_state *master = NULL;
5436
5437         if (stream->triggered_crtc_reset.enabled) {
5438                 master = stream->triggered_crtc_reset.event_source;
5439                 stream->triggered_crtc_reset.event =
5440                         master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5441                         CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5442                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5443         }
5444 }
5445
5446 static void set_master_stream(struct dc_stream_state *stream_set[],
5447                               int stream_count)
5448 {
5449         int j, highest_rfr = 0, master_stream = 0;
5450
5451         for (j = 0;  j < stream_count; j++) {
5452                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5453                         int refresh_rate = 0;
5454
5455                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5456                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5457                         if (refresh_rate > highest_rfr) {
5458                                 highest_rfr = refresh_rate;
5459                                 master_stream = j;
5460                         }
5461                 }
5462         }
5463         for (j = 0;  j < stream_count; j++) {
5464                 if (stream_set[j])
5465                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5466         }
5467 }
5468
5469 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5470 {
5471         int i = 0;
5472         struct dc_stream_state *stream;
5473
5474         if (context->stream_count < 2)
5475                 return;
5476         for (i = 0; i < context->stream_count ; i++) {
5477                 if (!context->streams[i])
5478                         continue;
5479                 /*
5480                  * TODO: add a function to read AMD VSDB bits and set
5481                  * crtc_sync_master.multi_sync_enabled flag
5482                  * For now it's set to false
5483                  */
5484         }
5485
5486         set_master_stream(context->streams, context->stream_count);
5487
5488         for (i = 0; i < context->stream_count ; i++) {
5489                 stream = context->streams[i];
5490
5491                 if (!stream)
5492                         continue;
5493
5494                 set_multisync_trigger_params(stream);
5495         }
5496 }
5497
5498 static struct drm_display_mode *
5499 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5500                           bool use_probed_modes)
5501 {
5502         struct drm_display_mode *m, *m_pref = NULL;
5503         u16 current_refresh, highest_refresh;
5504         struct list_head *list_head = use_probed_modes ?
5505                                                     &aconnector->base.probed_modes :
5506                                                     &aconnector->base.modes;
5507
5508         if (aconnector->freesync_vid_base.clock != 0)
5509                 return &aconnector->freesync_vid_base;
5510
5511         /* Find the preferred mode */
5512         list_for_each_entry (m, list_head, head) {
5513                 if (m->type & DRM_MODE_TYPE_PREFERRED) {
5514                         m_pref = m;
5515                         break;
5516                 }
5517         }
5518
5519         if (!m_pref) {
5520                 /* Probably an EDID with no preferred mode. Fallback to first entry */
5521                 m_pref = list_first_entry_or_null(
5522                         &aconnector->base.modes, struct drm_display_mode, head);
5523                 if (!m_pref) {
5524                         DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5525                         return NULL;
5526                 }
5527         }
5528
5529         highest_refresh = drm_mode_vrefresh(m_pref);
5530
5531         /*
5532          * Find the mode with highest refresh rate with same resolution.
5533          * For some monitors, preferred mode is not the mode with highest
5534          * supported refresh rate.
5535          */
5536         list_for_each_entry (m, list_head, head) {
5537                 current_refresh  = drm_mode_vrefresh(m);
5538
5539                 if (m->hdisplay == m_pref->hdisplay &&
5540                     m->vdisplay == m_pref->vdisplay &&
5541                     highest_refresh < current_refresh) {
5542                         highest_refresh = current_refresh;
5543                         m_pref = m;
5544                 }
5545         }
5546
5547         aconnector->freesync_vid_base = *m_pref;
5548         return m_pref;
5549 }
5550
5551 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
5552                                    struct amdgpu_dm_connector *aconnector)
5553 {
5554         struct drm_display_mode *high_mode;
5555         int timing_diff;
5556
5557         high_mode = get_highest_refresh_rate_mode(aconnector, false);
5558         if (!high_mode || !mode)
5559                 return false;
5560
5561         timing_diff = high_mode->vtotal - mode->vtotal;
5562
5563         if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5564             high_mode->hdisplay != mode->hdisplay ||
5565             high_mode->vdisplay != mode->vdisplay ||
5566             high_mode->hsync_start != mode->hsync_start ||
5567             high_mode->hsync_end != mode->hsync_end ||
5568             high_mode->htotal != mode->htotal ||
5569             high_mode->hskew != mode->hskew ||
5570             high_mode->vscan != mode->vscan ||
5571             high_mode->vsync_start - mode->vsync_start != timing_diff ||
5572             high_mode->vsync_end - mode->vsync_end != timing_diff)
5573                 return false;
5574         else
5575                 return true;
5576 }
5577
5578 static struct dc_stream_state *
5579 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5580                        const struct drm_display_mode *drm_mode,
5581                        const struct dm_connector_state *dm_state,
5582                        const struct dc_stream_state *old_stream,
5583                        int requested_bpc)
5584 {
5585         struct drm_display_mode *preferred_mode = NULL;
5586         struct drm_connector *drm_connector;
5587         const struct drm_connector_state *con_state =
5588                 dm_state ? &dm_state->base : NULL;
5589         struct dc_stream_state *stream = NULL;
5590         struct drm_display_mode mode = *drm_mode;
5591         struct drm_display_mode saved_mode;
5592         struct drm_display_mode *freesync_mode = NULL;
5593         bool native_mode_found = false;
5594         bool recalculate_timing = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5595         int mode_refresh;
5596         int preferred_refresh = 0;
5597 #if defined(CONFIG_DRM_AMD_DC_DCN)
5598         struct dsc_dec_dpcd_caps dsc_caps;
5599         uint32_t link_bandwidth_kbps;
5600 #endif
5601         struct dc_sink *sink = NULL;
5602
5603         memset(&saved_mode, 0, sizeof(saved_mode));
5604
5605         if (aconnector == NULL) {
5606                 DRM_ERROR("aconnector is NULL!\n");
5607                 return stream;
5608         }
5609
5610         drm_connector = &aconnector->base;
5611
5612         if (!aconnector->dc_sink) {
5613                 sink = create_fake_sink(aconnector);
5614                 if (!sink)
5615                         return stream;
5616         } else {
5617                 sink = aconnector->dc_sink;
5618                 dc_sink_retain(sink);
5619         }
5620
5621         stream = dc_create_stream_for_sink(sink);
5622
5623         if (stream == NULL) {
5624                 DRM_ERROR("Failed to create stream for sink!\n");
5625                 goto finish;
5626         }
5627
5628         stream->dm_stream_context = aconnector;
5629
5630         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5631                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5632
5633         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5634                 /* Search for preferred mode */
5635                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5636                         native_mode_found = true;
5637                         break;
5638                 }
5639         }
5640         if (!native_mode_found)
5641                 preferred_mode = list_first_entry_or_null(
5642                                 &aconnector->base.modes,
5643                                 struct drm_display_mode,
5644                                 head);
5645
5646         mode_refresh = drm_mode_vrefresh(&mode);
5647
5648         if (preferred_mode == NULL) {
5649                 /*
5650                  * This may not be an error, the use case is when we have no
5651                  * usermode calls to reset and set mode upon hotplug. In this
5652                  * case, we call set mode ourselves to restore the previous mode
5653                  * and the modelist may not be filled in in time.
5654                  */
5655                 DRM_DEBUG_DRIVER("No preferred mode found\n");
5656         } else {
5657                 recalculate_timing |= amdgpu_freesync_vid_mode &&
5658                                  is_freesync_video_mode(&mode, aconnector);
5659                 if (recalculate_timing) {
5660                         freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5661                         saved_mode = mode;
5662                         mode = *freesync_mode;
5663                 } else {
5664                         decide_crtc_timing_for_drm_display_mode(
5665                                 &mode, preferred_mode,
5666                                 dm_state ? (dm_state->scaling != RMX_OFF) : false);
5667                 }
5668
5669                 preferred_refresh = drm_mode_vrefresh(preferred_mode);
5670         }
5671
5672         if (recalculate_timing)
5673                 drm_mode_set_crtcinfo(&saved_mode, 0);
5674         else if (!dm_state)
5675                 drm_mode_set_crtcinfo(&mode, 0);
5676
5677        /*
5678         * If scaling is enabled and refresh rate didn't change
5679         * we copy the vic and polarities of the old timings
5680         */
5681         if (!recalculate_timing || mode_refresh != preferred_refresh)
5682                 fill_stream_properties_from_drm_display_mode(
5683                         stream, &mode, &aconnector->base, con_state, NULL,
5684                         requested_bpc);
5685         else
5686                 fill_stream_properties_from_drm_display_mode(
5687                         stream, &mode, &aconnector->base, con_state, old_stream,
5688                         requested_bpc);
5689
5690         stream->timing.flags.DSC = 0;
5691
5692         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5693 #if defined(CONFIG_DRM_AMD_DC_DCN)
5694                 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5695                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5696                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5697                                       &dsc_caps);
5698                 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5699                                                              dc_link_get_link_cap(aconnector->dc_link));
5700
5701                 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5702                         /* Set DSC policy according to dsc_clock_en */
5703                         dc_dsc_policy_set_enable_dsc_when_not_needed(
5704                                 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5705
5706                         if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5707                                                   &dsc_caps,
5708                                                   aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5709                                                   0,
5710                                                   link_bandwidth_kbps,
5711                                                   &stream->timing,
5712                                                   &stream->timing.dsc_cfg))
5713                                 stream->timing.flags.DSC = 1;
5714                         /* Overwrite the stream flag if DSC is enabled through debugfs */
5715                         if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5716                                 stream->timing.flags.DSC = 1;
5717
5718                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5719                                 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5720
5721                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5722                                 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5723
5724                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5725                                 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5726                 }
5727 #endif
5728         }
5729
5730         update_stream_scaling_settings(&mode, dm_state, stream);
5731
5732         fill_audio_info(
5733                 &stream->audio_info,
5734                 drm_connector,
5735                 sink);
5736
5737         update_stream_signal(stream, sink);
5738
5739         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5740                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5741
5742         if (stream->link->psr_settings.psr_feature_enabled) {
5743                 //
5744                 // should decide stream support vsc sdp colorimetry capability
5745                 // before building vsc info packet
5746                 //
5747                 stream->use_vsc_sdp_for_colorimetry = false;
5748                 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5749                         stream->use_vsc_sdp_for_colorimetry =
5750                                 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5751                 } else {
5752                         if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5753                                 stream->use_vsc_sdp_for_colorimetry = true;
5754                 }
5755                 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5756         }
5757 finish:
5758         dc_sink_release(sink);
5759
5760         return stream;
5761 }
5762
5763 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5764 {
5765         drm_crtc_cleanup(crtc);
5766         kfree(crtc);
5767 }
5768
5769 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5770                                   struct drm_crtc_state *state)
5771 {
5772         struct dm_crtc_state *cur = to_dm_crtc_state(state);
5773
5774         /* TODO Destroy dc_stream objects are stream object is flattened */
5775         if (cur->stream)
5776                 dc_stream_release(cur->stream);
5777
5778
5779         __drm_atomic_helper_crtc_destroy_state(state);
5780
5781
5782         kfree(state);
5783 }
5784
5785 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5786 {
5787         struct dm_crtc_state *state;
5788
5789         if (crtc->state)
5790                 dm_crtc_destroy_state(crtc, crtc->state);
5791
5792         state = kzalloc(sizeof(*state), GFP_KERNEL);
5793         if (WARN_ON(!state))
5794                 return;
5795
5796         __drm_atomic_helper_crtc_reset(crtc, &state->base);
5797 }
5798
5799 static struct drm_crtc_state *
5800 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5801 {
5802         struct dm_crtc_state *state, *cur;
5803
5804         cur = to_dm_crtc_state(crtc->state);
5805
5806         if (WARN_ON(!crtc->state))
5807                 return NULL;
5808
5809         state = kzalloc(sizeof(*state), GFP_KERNEL);
5810         if (!state)
5811                 return NULL;
5812
5813         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5814
5815         if (cur->stream) {
5816                 state->stream = cur->stream;
5817                 dc_stream_retain(state->stream);
5818         }
5819
5820         state->active_planes = cur->active_planes;
5821         state->vrr_infopacket = cur->vrr_infopacket;
5822         state->abm_level = cur->abm_level;
5823         state->vrr_supported = cur->vrr_supported;
5824         state->freesync_config = cur->freesync_config;
5825         state->cm_has_degamma = cur->cm_has_degamma;
5826         state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5827         /* TODO Duplicate dc_stream after objects are stream object is flattened */
5828
5829         return &state->base;
5830 }
5831
5832 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
5833 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
5834 {
5835         crtc_debugfs_init(crtc);
5836
5837         return 0;
5838 }
5839 #endif
5840
5841 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5842 {
5843         enum dc_irq_source irq_source;
5844         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5845         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5846         int rc;
5847
5848         irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5849
5850         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5851
5852         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
5853                       acrtc->crtc_id, enable ? "en" : "dis", rc);
5854         return rc;
5855 }
5856
5857 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5858 {
5859         enum dc_irq_source irq_source;
5860         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5861         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5862         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5863 #if defined(CONFIG_DRM_AMD_DC_DCN)
5864         struct amdgpu_display_manager *dm = &adev->dm;
5865         unsigned long flags;
5866 #endif
5867         int rc = 0;
5868
5869         if (enable) {
5870                 /* vblank irq on -> Only need vupdate irq in vrr mode */
5871                 if (amdgpu_dm_vrr_active(acrtc_state))
5872                         rc = dm_set_vupdate_irq(crtc, true);
5873         } else {
5874                 /* vblank irq off -> vupdate irq off */
5875                 rc = dm_set_vupdate_irq(crtc, false);
5876         }
5877
5878         if (rc)
5879                 return rc;
5880
5881         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5882
5883         if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5884                 return -EBUSY;
5885
5886         if (amdgpu_in_reset(adev))
5887                 return 0;
5888
5889 #if defined(CONFIG_DRM_AMD_DC_DCN)
5890         spin_lock_irqsave(&dm->vblank_lock, flags);
5891         dm->vblank_workqueue->dm = dm;
5892         dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
5893         dm->vblank_workqueue->enable = enable;
5894         spin_unlock_irqrestore(&dm->vblank_lock, flags);
5895         schedule_work(&dm->vblank_workqueue->mall_work);
5896 #endif
5897
5898         return 0;
5899 }
5900
5901 static int dm_enable_vblank(struct drm_crtc *crtc)
5902 {
5903         return dm_set_vblank(crtc, true);
5904 }
5905
5906 static void dm_disable_vblank(struct drm_crtc *crtc)
5907 {
5908         dm_set_vblank(crtc, false);
5909 }
5910
5911 /* Implemented only the options currently availible for the driver */
5912 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5913         .reset = dm_crtc_reset_state,
5914         .destroy = amdgpu_dm_crtc_destroy,
5915         .set_config = drm_atomic_helper_set_config,
5916         .page_flip = drm_atomic_helper_page_flip,
5917         .atomic_duplicate_state = dm_crtc_duplicate_state,
5918         .atomic_destroy_state = dm_crtc_destroy_state,
5919         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
5920         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5921         .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5922         .get_vblank_counter = amdgpu_get_vblank_counter_kms,
5923         .enable_vblank = dm_enable_vblank,
5924         .disable_vblank = dm_disable_vblank,
5925         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5926 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
5927         .late_register = amdgpu_dm_crtc_late_register,
5928 #endif
5929 };
5930
5931 static enum drm_connector_status
5932 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5933 {
5934         bool connected;
5935         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5936
5937         /*
5938          * Notes:
5939          * 1. This interface is NOT called in context of HPD irq.
5940          * 2. This interface *is called* in context of user-mode ioctl. Which
5941          * makes it a bad place for *any* MST-related activity.
5942          */
5943
5944         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5945             !aconnector->fake_enable)
5946                 connected = (aconnector->dc_sink != NULL);
5947         else
5948                 connected = (aconnector->base.force == DRM_FORCE_ON);
5949
5950         update_subconnector_property(aconnector);
5951
5952         return (connected ? connector_status_connected :
5953                         connector_status_disconnected);
5954 }
5955
5956 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5957                                             struct drm_connector_state *connector_state,
5958                                             struct drm_property *property,
5959                                             uint64_t val)
5960 {
5961         struct drm_device *dev = connector->dev;
5962         struct amdgpu_device *adev = drm_to_adev(dev);
5963         struct dm_connector_state *dm_old_state =
5964                 to_dm_connector_state(connector->state);
5965         struct dm_connector_state *dm_new_state =
5966                 to_dm_connector_state(connector_state);
5967
5968         int ret = -EINVAL;
5969
5970         if (property == dev->mode_config.scaling_mode_property) {
5971                 enum amdgpu_rmx_type rmx_type;
5972
5973                 switch (val) {
5974                 case DRM_MODE_SCALE_CENTER:
5975                         rmx_type = RMX_CENTER;
5976                         break;
5977                 case DRM_MODE_SCALE_ASPECT:
5978                         rmx_type = RMX_ASPECT;
5979                         break;
5980                 case DRM_MODE_SCALE_FULLSCREEN:
5981                         rmx_type = RMX_FULL;
5982                         break;
5983                 case DRM_MODE_SCALE_NONE:
5984                 default:
5985                         rmx_type = RMX_OFF;
5986                         break;
5987                 }
5988
5989                 if (dm_old_state->scaling == rmx_type)
5990                         return 0;
5991
5992                 dm_new_state->scaling = rmx_type;
5993                 ret = 0;
5994         } else if (property == adev->mode_info.underscan_hborder_property) {
5995                 dm_new_state->underscan_hborder = val;
5996                 ret = 0;
5997         } else if (property == adev->mode_info.underscan_vborder_property) {
5998                 dm_new_state->underscan_vborder = val;
5999                 ret = 0;
6000         } else if (property == adev->mode_info.underscan_property) {
6001                 dm_new_state->underscan_enable = val;
6002                 ret = 0;
6003         } else if (property == adev->mode_info.abm_level_property) {
6004                 dm_new_state->abm_level = val;
6005                 ret = 0;
6006         }
6007
6008         return ret;
6009 }
6010
6011 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6012                                             const struct drm_connector_state *state,
6013                                             struct drm_property *property,
6014                                             uint64_t *val)
6015 {
6016         struct drm_device *dev = connector->dev;
6017         struct amdgpu_device *adev = drm_to_adev(dev);
6018         struct dm_connector_state *dm_state =
6019                 to_dm_connector_state(state);
6020         int ret = -EINVAL;
6021
6022         if (property == dev->mode_config.scaling_mode_property) {
6023                 switch (dm_state->scaling) {
6024                 case RMX_CENTER:
6025                         *val = DRM_MODE_SCALE_CENTER;
6026                         break;
6027                 case RMX_ASPECT:
6028                         *val = DRM_MODE_SCALE_ASPECT;
6029                         break;
6030                 case RMX_FULL:
6031                         *val = DRM_MODE_SCALE_FULLSCREEN;
6032                         break;
6033                 case RMX_OFF:
6034                 default:
6035                         *val = DRM_MODE_SCALE_NONE;
6036                         break;
6037                 }
6038                 ret = 0;
6039         } else if (property == adev->mode_info.underscan_hborder_property) {
6040                 *val = dm_state->underscan_hborder;
6041                 ret = 0;
6042         } else if (property == adev->mode_info.underscan_vborder_property) {
6043                 *val = dm_state->underscan_vborder;
6044                 ret = 0;
6045         } else if (property == adev->mode_info.underscan_property) {
6046                 *val = dm_state->underscan_enable;
6047                 ret = 0;
6048         } else if (property == adev->mode_info.abm_level_property) {
6049                 *val = dm_state->abm_level;
6050                 ret = 0;
6051         }
6052
6053         return ret;
6054 }
6055
6056 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6057 {
6058         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6059
6060         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6061 }
6062
6063 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6064 {
6065         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6066         const struct dc_link *link = aconnector->dc_link;
6067         struct amdgpu_device *adev = drm_to_adev(connector->dev);
6068         struct amdgpu_display_manager *dm = &adev->dm;
6069
6070         /*
6071          * Call only if mst_mgr was iniitalized before since it's not done
6072          * for all connector types.
6073          */
6074         if (aconnector->mst_mgr.dev)
6075                 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6076
6077 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6078         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6079
6080         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
6081             link->type != dc_connection_none &&
6082             dm->backlight_dev) {
6083                 backlight_device_unregister(dm->backlight_dev);
6084                 dm->backlight_dev = NULL;
6085         }
6086 #endif
6087
6088         if (aconnector->dc_em_sink)
6089                 dc_sink_release(aconnector->dc_em_sink);
6090         aconnector->dc_em_sink = NULL;
6091         if (aconnector->dc_sink)
6092                 dc_sink_release(aconnector->dc_sink);
6093         aconnector->dc_sink = NULL;
6094
6095         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6096         drm_connector_unregister(connector);
6097         drm_connector_cleanup(connector);
6098         if (aconnector->i2c) {
6099                 i2c_del_adapter(&aconnector->i2c->base);
6100                 kfree(aconnector->i2c);
6101         }
6102         kfree(aconnector->dm_dp_aux.aux.name);
6103
6104         kfree(connector);
6105 }
6106
6107 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6108 {
6109         struct dm_connector_state *state =
6110                 to_dm_connector_state(connector->state);
6111
6112         if (connector->state)
6113                 __drm_atomic_helper_connector_destroy_state(connector->state);
6114
6115         kfree(state);
6116
6117         state = kzalloc(sizeof(*state), GFP_KERNEL);
6118
6119         if (state) {
6120                 state->scaling = RMX_OFF;
6121                 state->underscan_enable = false;
6122                 state->underscan_hborder = 0;
6123                 state->underscan_vborder = 0;
6124                 state->base.max_requested_bpc = 8;
6125                 state->vcpi_slots = 0;
6126                 state->pbn = 0;
6127                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6128                         state->abm_level = amdgpu_dm_abm_level;
6129
6130                 __drm_atomic_helper_connector_reset(connector, &state->base);
6131         }
6132 }
6133
6134 struct drm_connector_state *
6135 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6136 {
6137         struct dm_connector_state *state =
6138                 to_dm_connector_state(connector->state);
6139
6140         struct dm_connector_state *new_state =
6141                         kmemdup(state, sizeof(*state), GFP_KERNEL);
6142
6143         if (!new_state)
6144                 return NULL;
6145
6146         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6147
6148         new_state->freesync_capable = state->freesync_capable;
6149         new_state->abm_level = state->abm_level;
6150         new_state->scaling = state->scaling;
6151         new_state->underscan_enable = state->underscan_enable;
6152         new_state->underscan_hborder = state->underscan_hborder;
6153         new_state->underscan_vborder = state->underscan_vborder;
6154         new_state->vcpi_slots = state->vcpi_slots;
6155         new_state->pbn = state->pbn;
6156         return &new_state->base;
6157 }
6158
6159 static int
6160 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6161 {
6162         struct amdgpu_dm_connector *amdgpu_dm_connector =
6163                 to_amdgpu_dm_connector(connector);
6164         int r;
6165
6166         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6167             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6168                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6169                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6170                 if (r)
6171                         return r;
6172         }
6173
6174 #if defined(CONFIG_DEBUG_FS)
6175         connector_debugfs_init(amdgpu_dm_connector);
6176 #endif
6177
6178         return 0;
6179 }
6180
6181 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6182         .reset = amdgpu_dm_connector_funcs_reset,
6183         .detect = amdgpu_dm_connector_detect,
6184         .fill_modes = drm_helper_probe_single_connector_modes,
6185         .destroy = amdgpu_dm_connector_destroy,
6186         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6187         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6188         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6189         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6190         .late_register = amdgpu_dm_connector_late_register,
6191         .early_unregister = amdgpu_dm_connector_unregister
6192 };
6193
6194 static int get_modes(struct drm_connector *connector)
6195 {
6196         return amdgpu_dm_connector_get_modes(connector);
6197 }
6198
6199 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6200 {
6201         struct dc_sink_init_data init_params = {
6202                         .link = aconnector->dc_link,
6203                         .sink_signal = SIGNAL_TYPE_VIRTUAL
6204         };
6205         struct edid *edid;
6206
6207         if (!aconnector->base.edid_blob_ptr) {
6208                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6209                                 aconnector->base.name);
6210
6211                 aconnector->base.force = DRM_FORCE_OFF;
6212                 aconnector->base.override_edid = false;
6213                 return;
6214         }
6215
6216         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6217
6218         aconnector->edid = edid;
6219
6220         aconnector->dc_em_sink = dc_link_add_remote_sink(
6221                 aconnector->dc_link,
6222                 (uint8_t *)edid,
6223                 (edid->extensions + 1) * EDID_LENGTH,
6224                 &init_params);
6225
6226         if (aconnector->base.force == DRM_FORCE_ON) {
6227                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6228                 aconnector->dc_link->local_sink :
6229                 aconnector->dc_em_sink;
6230                 dc_sink_retain(aconnector->dc_sink);
6231         }
6232 }
6233
6234 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6235 {
6236         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6237
6238         /*
6239          * In case of headless boot with force on for DP managed connector
6240          * Those settings have to be != 0 to get initial modeset
6241          */
6242         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6243                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6244                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6245         }
6246
6247
6248         aconnector->base.override_edid = true;
6249         create_eml_sink(aconnector);
6250 }
6251
6252 static struct dc_stream_state *
6253 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6254                                 const struct drm_display_mode *drm_mode,
6255                                 const struct dm_connector_state *dm_state,
6256                                 const struct dc_stream_state *old_stream)
6257 {
6258         struct drm_connector *connector = &aconnector->base;
6259         struct amdgpu_device *adev = drm_to_adev(connector->dev);
6260         struct dc_stream_state *stream;
6261         const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6262         int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6263         enum dc_status dc_result = DC_OK;
6264
6265         do {
6266                 stream = create_stream_for_sink(aconnector, drm_mode,
6267                                                 dm_state, old_stream,
6268                                                 requested_bpc);
6269                 if (stream == NULL) {
6270                         DRM_ERROR("Failed to create stream for sink!\n");
6271                         break;
6272                 }
6273
6274                 dc_result = dc_validate_stream(adev->dm.dc, stream);
6275
6276                 if (dc_result != DC_OK) {
6277                         DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6278                                       drm_mode->hdisplay,
6279                                       drm_mode->vdisplay,
6280                                       drm_mode->clock,
6281                                       dc_result,
6282                                       dc_status_to_str(dc_result));
6283
6284                         dc_stream_release(stream);
6285                         stream = NULL;
6286                         requested_bpc -= 2; /* lower bpc to retry validation */
6287                 }
6288
6289         } while (stream == NULL && requested_bpc >= 6);
6290
6291         if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6292                 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6293
6294                 aconnector->force_yuv420_output = true;
6295                 stream = create_validate_stream_for_sink(aconnector, drm_mode,
6296                                                 dm_state, old_stream);
6297                 aconnector->force_yuv420_output = false;
6298         }
6299
6300         return stream;
6301 }
6302
6303 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6304                                    struct drm_display_mode *mode)
6305 {
6306         int result = MODE_ERROR;
6307         struct dc_sink *dc_sink;
6308         /* TODO: Unhardcode stream count */
6309         struct dc_stream_state *stream;
6310         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6311
6312         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6313                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6314                 return result;
6315
6316         /*
6317          * Only run this the first time mode_valid is called to initilialize
6318          * EDID mgmt
6319          */
6320         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6321                 !aconnector->dc_em_sink)
6322                 handle_edid_mgmt(aconnector);
6323
6324         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6325
6326         if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6327                                 aconnector->base.force != DRM_FORCE_ON) {
6328                 DRM_ERROR("dc_sink is NULL!\n");
6329                 goto fail;
6330         }
6331
6332         stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6333         if (stream) {
6334                 dc_stream_release(stream);
6335                 result = MODE_OK;
6336         }
6337
6338 fail:
6339         /* TODO: error handling*/
6340         return result;
6341 }
6342
6343 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6344                                 struct dc_info_packet *out)
6345 {
6346         struct hdmi_drm_infoframe frame;
6347         unsigned char buf[30]; /* 26 + 4 */
6348         ssize_t len;
6349         int ret, i;
6350
6351         memset(out, 0, sizeof(*out));
6352
6353         if (!state->hdr_output_metadata)
6354                 return 0;
6355
6356         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6357         if (ret)
6358                 return ret;
6359
6360         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6361         if (len < 0)
6362                 return (int)len;
6363
6364         /* Static metadata is a fixed 26 bytes + 4 byte header. */
6365         if (len != 30)
6366                 return -EINVAL;
6367
6368         /* Prepare the infopacket for DC. */
6369         switch (state->connector->connector_type) {
6370         case DRM_MODE_CONNECTOR_HDMIA:
6371                 out->hb0 = 0x87; /* type */
6372                 out->hb1 = 0x01; /* version */
6373                 out->hb2 = 0x1A; /* length */
6374                 out->sb[0] = buf[3]; /* checksum */
6375                 i = 1;
6376                 break;
6377
6378         case DRM_MODE_CONNECTOR_DisplayPort:
6379         case DRM_MODE_CONNECTOR_eDP:
6380                 out->hb0 = 0x00; /* sdp id, zero */
6381                 out->hb1 = 0x87; /* type */
6382                 out->hb2 = 0x1D; /* payload len - 1 */
6383                 out->hb3 = (0x13 << 2); /* sdp version */
6384                 out->sb[0] = 0x01; /* version */
6385                 out->sb[1] = 0x1A; /* length */
6386                 i = 2;
6387                 break;
6388
6389         default:
6390                 return -EINVAL;
6391         }
6392
6393         memcpy(&out->sb[i], &buf[4], 26);
6394         out->valid = true;
6395
6396         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6397                        sizeof(out->sb), false);
6398
6399         return 0;
6400 }
6401
6402 static int
6403 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6404                                  struct drm_atomic_state *state)
6405 {
6406         struct drm_connector_state *new_con_state =
6407                 drm_atomic_get_new_connector_state(state, conn);
6408         struct drm_connector_state *old_con_state =
6409                 drm_atomic_get_old_connector_state(state, conn);
6410         struct drm_crtc *crtc = new_con_state->crtc;
6411         struct drm_crtc_state *new_crtc_state;
6412         int ret;
6413
6414         trace_amdgpu_dm_connector_atomic_check(new_con_state);
6415
6416         if (!crtc)
6417                 return 0;
6418
6419         if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
6420                 struct dc_info_packet hdr_infopacket;
6421
6422                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6423                 if (ret)
6424                         return ret;
6425
6426                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6427                 if (IS_ERR(new_crtc_state))
6428                         return PTR_ERR(new_crtc_state);
6429
6430                 /*
6431                  * DC considers the stream backends changed if the
6432                  * static metadata changes. Forcing the modeset also
6433                  * gives a simple way for userspace to switch from
6434                  * 8bpc to 10bpc when setting the metadata to enter
6435                  * or exit HDR.
6436                  *
6437                  * Changing the static metadata after it's been
6438                  * set is permissible, however. So only force a
6439                  * modeset if we're entering or exiting HDR.
6440                  */
6441                 new_crtc_state->mode_changed =
6442                         !old_con_state->hdr_output_metadata ||
6443                         !new_con_state->hdr_output_metadata;
6444         }
6445
6446         return 0;
6447 }
6448
6449 static const struct drm_connector_helper_funcs
6450 amdgpu_dm_connector_helper_funcs = {
6451         /*
6452          * If hotplugging a second bigger display in FB Con mode, bigger resolution
6453          * modes will be filtered by drm_mode_validate_size(), and those modes
6454          * are missing after user start lightdm. So we need to renew modes list.
6455          * in get_modes call back, not just return the modes count
6456          */
6457         .get_modes = get_modes,
6458         .mode_valid = amdgpu_dm_connector_mode_valid,
6459         .atomic_check = amdgpu_dm_connector_atomic_check,
6460 };
6461
6462 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6463 {
6464 }
6465
6466 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6467 {
6468         struct drm_atomic_state *state = new_crtc_state->state;
6469         struct drm_plane *plane;
6470         int num_active = 0;
6471
6472         drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6473                 struct drm_plane_state *new_plane_state;
6474
6475                 /* Cursor planes are "fake". */
6476                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6477                         continue;
6478
6479                 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6480
6481                 if (!new_plane_state) {
6482                         /*
6483                          * The plane is enable on the CRTC and hasn't changed
6484                          * state. This means that it previously passed
6485                          * validation and is therefore enabled.
6486                          */
6487                         num_active += 1;
6488                         continue;
6489                 }
6490
6491                 /* We need a framebuffer to be considered enabled. */
6492                 num_active += (new_plane_state->fb != NULL);
6493         }
6494
6495         return num_active;
6496 }
6497
6498 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6499                                          struct drm_crtc_state *new_crtc_state)
6500 {
6501         struct dm_crtc_state *dm_new_crtc_state =
6502                 to_dm_crtc_state(new_crtc_state);
6503
6504         dm_new_crtc_state->active_planes = 0;
6505
6506         if (!dm_new_crtc_state->stream)
6507                 return;
6508
6509         dm_new_crtc_state->active_planes =
6510                 count_crtc_active_planes(new_crtc_state);
6511 }
6512
6513 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6514                                        struct drm_atomic_state *state)
6515 {
6516         struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6517                                                                           crtc);
6518         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6519         struct dc *dc = adev->dm.dc;
6520         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6521         int ret = -EINVAL;
6522
6523         trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6524
6525         dm_update_crtc_active_planes(crtc, crtc_state);
6526
6527         if (unlikely(!dm_crtc_state->stream &&
6528                      modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6529                 WARN_ON(1);
6530                 return ret;
6531         }
6532
6533         /*
6534          * We require the primary plane to be enabled whenever the CRTC is, otherwise
6535          * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6536          * planes are disabled, which is not supported by the hardware. And there is legacy
6537          * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6538          */
6539         if (crtc_state->enable &&
6540             !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6541                 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6542                 return -EINVAL;
6543         }
6544
6545         /* In some use cases, like reset, no stream is attached */
6546         if (!dm_crtc_state->stream)
6547                 return 0;
6548
6549         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6550                 return 0;
6551
6552         DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6553         return ret;
6554 }
6555
6556 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6557                                       const struct drm_display_mode *mode,
6558                                       struct drm_display_mode *adjusted_mode)
6559 {
6560         return true;
6561 }
6562
6563 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6564         .disable = dm_crtc_helper_disable,
6565         .atomic_check = dm_crtc_helper_atomic_check,
6566         .mode_fixup = dm_crtc_helper_mode_fixup,
6567         .get_scanout_position = amdgpu_crtc_get_scanout_position,
6568 };
6569
6570 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6571 {
6572
6573 }
6574
6575 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6576 {
6577         switch (display_color_depth) {
6578                 case COLOR_DEPTH_666:
6579                         return 6;
6580                 case COLOR_DEPTH_888:
6581                         return 8;
6582                 case COLOR_DEPTH_101010:
6583                         return 10;
6584                 case COLOR_DEPTH_121212:
6585                         return 12;
6586                 case COLOR_DEPTH_141414:
6587                         return 14;
6588                 case COLOR_DEPTH_161616:
6589                         return 16;
6590                 default:
6591                         break;
6592                 }
6593         return 0;
6594 }
6595
6596 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6597                                           struct drm_crtc_state *crtc_state,
6598                                           struct drm_connector_state *conn_state)
6599 {
6600         struct drm_atomic_state *state = crtc_state->state;
6601         struct drm_connector *connector = conn_state->connector;
6602         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6603         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6604         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6605         struct drm_dp_mst_topology_mgr *mst_mgr;
6606         struct drm_dp_mst_port *mst_port;
6607         enum dc_color_depth color_depth;
6608         int clock, bpp = 0;
6609         bool is_y420 = false;
6610
6611         if (!aconnector->port || !aconnector->dc_sink)
6612                 return 0;
6613
6614         mst_port = aconnector->port;
6615         mst_mgr = &aconnector->mst_port->mst_mgr;
6616
6617         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6618                 return 0;
6619
6620         if (!state->duplicated) {
6621                 int max_bpc = conn_state->max_requested_bpc;
6622                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6623                                 aconnector->force_yuv420_output;
6624                 color_depth = convert_color_depth_from_display_info(connector,
6625                                                                     is_y420,
6626                                                                     max_bpc);
6627                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6628                 clock = adjusted_mode->clock;
6629                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6630         }
6631         dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6632                                                                            mst_mgr,
6633                                                                            mst_port,
6634                                                                            dm_new_connector_state->pbn,
6635                                                                            dm_mst_get_pbn_divider(aconnector->dc_link));
6636         if (dm_new_connector_state->vcpi_slots < 0) {
6637                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6638                 return dm_new_connector_state->vcpi_slots;
6639         }
6640         return 0;
6641 }
6642
6643 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6644         .disable = dm_encoder_helper_disable,
6645         .atomic_check = dm_encoder_helper_atomic_check
6646 };
6647
6648 #if defined(CONFIG_DRM_AMD_DC_DCN)
6649 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6650                                             struct dc_state *dc_state)
6651 {
6652         struct dc_stream_state *stream = NULL;
6653         struct drm_connector *connector;
6654         struct drm_connector_state *new_con_state;
6655         struct amdgpu_dm_connector *aconnector;
6656         struct dm_connector_state *dm_conn_state;
6657         int i, j, clock, bpp;
6658         int vcpi, pbn_div, pbn = 0;
6659
6660         for_each_new_connector_in_state(state, connector, new_con_state, i) {
6661
6662                 aconnector = to_amdgpu_dm_connector(connector);
6663
6664                 if (!aconnector->port)
6665                         continue;
6666
6667                 if (!new_con_state || !new_con_state->crtc)
6668                         continue;
6669
6670                 dm_conn_state = to_dm_connector_state(new_con_state);
6671
6672                 for (j = 0; j < dc_state->stream_count; j++) {
6673                         stream = dc_state->streams[j];
6674                         if (!stream)
6675                                 continue;
6676
6677                         if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6678                                 break;
6679
6680                         stream = NULL;
6681                 }
6682
6683                 if (!stream)
6684                         continue;
6685
6686                 if (stream->timing.flags.DSC != 1) {
6687                         drm_dp_mst_atomic_enable_dsc(state,
6688                                                      aconnector->port,
6689                                                      dm_conn_state->pbn,
6690                                                      0,
6691                                                      false);
6692                         continue;
6693                 }
6694
6695                 pbn_div = dm_mst_get_pbn_divider(stream->link);
6696                 bpp = stream->timing.dsc_cfg.bits_per_pixel;
6697                 clock = stream->timing.pix_clk_100hz / 10;
6698                 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6699                 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6700                                                     aconnector->port,
6701                                                     pbn, pbn_div,
6702                                                     true);
6703                 if (vcpi < 0)
6704                         return vcpi;
6705
6706                 dm_conn_state->pbn = pbn;
6707                 dm_conn_state->vcpi_slots = vcpi;
6708         }
6709         return 0;
6710 }
6711 #endif
6712
6713 static void dm_drm_plane_reset(struct drm_plane *plane)
6714 {
6715         struct dm_plane_state *amdgpu_state = NULL;
6716
6717         if (plane->state)
6718                 plane->funcs->atomic_destroy_state(plane, plane->state);
6719
6720         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6721         WARN_ON(amdgpu_state == NULL);
6722
6723         if (amdgpu_state)
6724                 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6725 }
6726
6727 static struct drm_plane_state *
6728 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6729 {
6730         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6731
6732         old_dm_plane_state = to_dm_plane_state(plane->state);
6733         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6734         if (!dm_plane_state)
6735                 return NULL;
6736
6737         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6738
6739         if (old_dm_plane_state->dc_state) {
6740                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6741                 dc_plane_state_retain(dm_plane_state->dc_state);
6742         }
6743
6744         return &dm_plane_state->base;
6745 }
6746
6747 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6748                                 struct drm_plane_state *state)
6749 {
6750         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6751
6752         if (dm_plane_state->dc_state)
6753                 dc_plane_state_release(dm_plane_state->dc_state);
6754
6755         drm_atomic_helper_plane_destroy_state(plane, state);
6756 }
6757
6758 static const struct drm_plane_funcs dm_plane_funcs = {
6759         .update_plane   = drm_atomic_helper_update_plane,
6760         .disable_plane  = drm_atomic_helper_disable_plane,
6761         .destroy        = drm_primary_helper_destroy,
6762         .reset = dm_drm_plane_reset,
6763         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6764         .atomic_destroy_state = dm_drm_plane_destroy_state,
6765         .format_mod_supported = dm_plane_format_mod_supported,
6766 };
6767
6768 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6769                                       struct drm_plane_state *new_state)
6770 {
6771         struct amdgpu_framebuffer *afb;
6772         struct drm_gem_object *obj;
6773         struct amdgpu_device *adev;
6774         struct amdgpu_bo *rbo;
6775         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6776         struct list_head list;
6777         struct ttm_validate_buffer tv;
6778         struct ww_acquire_ctx ticket;
6779         uint32_t domain;
6780         int r;
6781
6782         if (!new_state->fb) {
6783                 DRM_DEBUG_KMS("No FB bound\n");
6784                 return 0;
6785         }
6786
6787         afb = to_amdgpu_framebuffer(new_state->fb);
6788         obj = new_state->fb->obj[0];
6789         rbo = gem_to_amdgpu_bo(obj);
6790         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6791         INIT_LIST_HEAD(&list);
6792
6793         tv.bo = &rbo->tbo;
6794         tv.num_shared = 1;
6795         list_add(&tv.head, &list);
6796
6797         r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6798         if (r) {
6799                 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6800                 return r;
6801         }
6802
6803         if (plane->type != DRM_PLANE_TYPE_CURSOR)
6804                 domain = amdgpu_display_supported_domains(adev, rbo->flags);
6805         else
6806                 domain = AMDGPU_GEM_DOMAIN_VRAM;
6807
6808         r = amdgpu_bo_pin(rbo, domain);
6809         if (unlikely(r != 0)) {
6810                 if (r != -ERESTARTSYS)
6811                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6812                 ttm_eu_backoff_reservation(&ticket, &list);
6813                 return r;
6814         }
6815
6816         r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6817         if (unlikely(r != 0)) {
6818                 amdgpu_bo_unpin(rbo);
6819                 ttm_eu_backoff_reservation(&ticket, &list);
6820                 DRM_ERROR("%p bind failed\n", rbo);
6821                 return r;
6822         }
6823
6824         ttm_eu_backoff_reservation(&ticket, &list);
6825
6826         afb->address = amdgpu_bo_gpu_offset(rbo);
6827
6828         amdgpu_bo_ref(rbo);
6829
6830         /**
6831          * We don't do surface updates on planes that have been newly created,
6832          * but we also don't have the afb->address during atomic check.
6833          *
6834          * Fill in buffer attributes depending on the address here, but only on
6835          * newly created planes since they're not being used by DC yet and this
6836          * won't modify global state.
6837          */
6838         dm_plane_state_old = to_dm_plane_state(plane->state);
6839         dm_plane_state_new = to_dm_plane_state(new_state);
6840
6841         if (dm_plane_state_new->dc_state &&
6842             dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6843                 struct dc_plane_state *plane_state =
6844                         dm_plane_state_new->dc_state;
6845                 bool force_disable_dcc = !plane_state->dcc.enable;
6846
6847                 fill_plane_buffer_attributes(
6848                         adev, afb, plane_state->format, plane_state->rotation,
6849                         afb->tiling_flags,
6850                         &plane_state->tiling_info, &plane_state->plane_size,
6851                         &plane_state->dcc, &plane_state->address,
6852                         afb->tmz_surface, force_disable_dcc);
6853         }
6854
6855         return 0;
6856 }
6857
6858 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6859                                        struct drm_plane_state *old_state)
6860 {
6861         struct amdgpu_bo *rbo;
6862         int r;
6863
6864         if (!old_state->fb)
6865                 return;
6866
6867         rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6868         r = amdgpu_bo_reserve(rbo, false);
6869         if (unlikely(r)) {
6870                 DRM_ERROR("failed to reserve rbo before unpin\n");
6871                 return;
6872         }
6873
6874         amdgpu_bo_unpin(rbo);
6875         amdgpu_bo_unreserve(rbo);
6876         amdgpu_bo_unref(&rbo);
6877 }
6878
6879 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6880                                        struct drm_crtc_state *new_crtc_state)
6881 {
6882         struct drm_framebuffer *fb = state->fb;
6883         int min_downscale, max_upscale;
6884         int min_scale = 0;
6885         int max_scale = INT_MAX;
6886
6887         /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6888         if (fb && state->crtc) {
6889                 /* Validate viewport to cover the case when only the position changes */
6890                 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
6891                         int viewport_width = state->crtc_w;
6892                         int viewport_height = state->crtc_h;
6893
6894                         if (state->crtc_x < 0)
6895                                 viewport_width += state->crtc_x;
6896                         else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
6897                                 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
6898
6899                         if (state->crtc_y < 0)
6900                                 viewport_height += state->crtc_y;
6901                         else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
6902                                 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
6903
6904                         if (viewport_width < 0 || viewport_height < 0) {
6905                                 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
6906                                 return -EINVAL;
6907                         } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
6908                                 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
6909                                 return -EINVAL;
6910                         } else if (viewport_height < MIN_VIEWPORT_SIZE) {
6911                                 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
6912                                 return -EINVAL;
6913                         }
6914
6915                 }
6916
6917                 /* Get min/max allowed scaling factors from plane caps. */
6918                 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6919                                              &min_downscale, &max_upscale);
6920                 /*
6921                  * Convert to drm convention: 16.16 fixed point, instead of dc's
6922                  * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6923                  * dst/src, so min_scale = 1.0 / max_upscale, etc.
6924                  */
6925                 min_scale = (1000 << 16) / max_upscale;
6926                 max_scale = (1000 << 16) / min_downscale;
6927         }
6928
6929         return drm_atomic_helper_check_plane_state(
6930                 state, new_crtc_state, min_scale, max_scale, true, true);
6931 }
6932
6933 static int dm_plane_atomic_check(struct drm_plane *plane,
6934                                  struct drm_atomic_state *state)
6935 {
6936         struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
6937                                                                                  plane);
6938         struct amdgpu_device *adev = drm_to_adev(plane->dev);
6939         struct dc *dc = adev->dm.dc;
6940         struct dm_plane_state *dm_plane_state;
6941         struct dc_scaling_info scaling_info;
6942         struct drm_crtc_state *new_crtc_state;
6943         int ret;
6944
6945         trace_amdgpu_dm_plane_atomic_check(new_plane_state);
6946
6947         dm_plane_state = to_dm_plane_state(new_plane_state);
6948
6949         if (!dm_plane_state->dc_state)
6950                 return 0;
6951
6952         new_crtc_state =
6953                 drm_atomic_get_new_crtc_state(state,
6954                                               new_plane_state->crtc);
6955         if (!new_crtc_state)
6956                 return -EINVAL;
6957
6958         ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
6959         if (ret)
6960                 return ret;
6961
6962         ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
6963         if (ret)
6964                 return ret;
6965
6966         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6967                 return 0;
6968
6969         return -EINVAL;
6970 }
6971
6972 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6973                                        struct drm_atomic_state *state)
6974 {
6975         /* Only support async updates on cursor planes. */
6976         if (plane->type != DRM_PLANE_TYPE_CURSOR)
6977                 return -EINVAL;
6978
6979         return 0;
6980 }
6981
6982 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6983                                          struct drm_atomic_state *state)
6984 {
6985         struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
6986                                                                            plane);
6987         struct drm_plane_state *old_state =
6988                 drm_atomic_get_old_plane_state(state, plane);
6989
6990         trace_amdgpu_dm_atomic_update_cursor(new_state);
6991
6992         swap(plane->state->fb, new_state->fb);
6993
6994         plane->state->src_x = new_state->src_x;
6995         plane->state->src_y = new_state->src_y;
6996         plane->state->src_w = new_state->src_w;
6997         plane->state->src_h = new_state->src_h;
6998         plane->state->crtc_x = new_state->crtc_x;
6999         plane->state->crtc_y = new_state->crtc_y;
7000         plane->state->crtc_w = new_state->crtc_w;
7001         plane->state->crtc_h = new_state->crtc_h;
7002
7003         handle_cursor_update(plane, old_state);
7004 }
7005
7006 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7007         .prepare_fb = dm_plane_helper_prepare_fb,
7008         .cleanup_fb = dm_plane_helper_cleanup_fb,
7009         .atomic_check = dm_plane_atomic_check,
7010         .atomic_async_check = dm_plane_atomic_async_check,
7011         .atomic_async_update = dm_plane_atomic_async_update
7012 };
7013
7014 /*
7015  * TODO: these are currently initialized to rgb formats only.
7016  * For future use cases we should either initialize them dynamically based on
7017  * plane capabilities, or initialize this array to all formats, so internal drm
7018  * check will succeed, and let DC implement proper check
7019  */
7020 static const uint32_t rgb_formats[] = {
7021         DRM_FORMAT_XRGB8888,
7022         DRM_FORMAT_ARGB8888,
7023         DRM_FORMAT_RGBA8888,
7024         DRM_FORMAT_XRGB2101010,
7025         DRM_FORMAT_XBGR2101010,
7026         DRM_FORMAT_ARGB2101010,
7027         DRM_FORMAT_ABGR2101010,
7028         DRM_FORMAT_XBGR8888,
7029         DRM_FORMAT_ABGR8888,
7030         DRM_FORMAT_RGB565,
7031 };
7032
7033 static const uint32_t overlay_formats[] = {
7034         DRM_FORMAT_XRGB8888,
7035         DRM_FORMAT_ARGB8888,
7036         DRM_FORMAT_RGBA8888,
7037         DRM_FORMAT_XBGR8888,
7038         DRM_FORMAT_ABGR8888,
7039         DRM_FORMAT_RGB565
7040 };
7041
7042 static const u32 cursor_formats[] = {
7043         DRM_FORMAT_ARGB8888
7044 };
7045
7046 static int get_plane_formats(const struct drm_plane *plane,
7047                              const struct dc_plane_cap *plane_cap,
7048                              uint32_t *formats, int max_formats)
7049 {
7050         int i, num_formats = 0;
7051
7052         /*
7053          * TODO: Query support for each group of formats directly from
7054          * DC plane caps. This will require adding more formats to the
7055          * caps list.
7056          */
7057
7058         switch (plane->type) {
7059         case DRM_PLANE_TYPE_PRIMARY:
7060                 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7061                         if (num_formats >= max_formats)
7062                                 break;
7063
7064                         formats[num_formats++] = rgb_formats[i];
7065                 }
7066
7067                 if (plane_cap && plane_cap->pixel_format_support.nv12)
7068                         formats[num_formats++] = DRM_FORMAT_NV12;
7069                 if (plane_cap && plane_cap->pixel_format_support.p010)
7070                         formats[num_formats++] = DRM_FORMAT_P010;
7071                 if (plane_cap && plane_cap->pixel_format_support.fp16) {
7072                         formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7073                         formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7074                         formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7075                         formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7076                 }
7077                 break;
7078
7079         case DRM_PLANE_TYPE_OVERLAY:
7080                 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7081                         if (num_formats >= max_formats)
7082                                 break;
7083
7084                         formats[num_formats++] = overlay_formats[i];
7085                 }
7086                 break;
7087
7088         case DRM_PLANE_TYPE_CURSOR:
7089                 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7090                         if (num_formats >= max_formats)
7091                                 break;
7092
7093                         formats[num_formats++] = cursor_formats[i];
7094                 }
7095                 break;
7096         }
7097
7098         return num_formats;
7099 }
7100
7101 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7102                                 struct drm_plane *plane,
7103                                 unsigned long possible_crtcs,
7104                                 const struct dc_plane_cap *plane_cap)
7105 {
7106         uint32_t formats[32];
7107         int num_formats;
7108         int res = -EPERM;
7109         unsigned int supported_rotations;
7110         uint64_t *modifiers = NULL;
7111
7112         num_formats = get_plane_formats(plane, plane_cap, formats,
7113                                         ARRAY_SIZE(formats));
7114
7115         res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7116         if (res)
7117                 return res;
7118
7119         res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7120                                        &dm_plane_funcs, formats, num_formats,
7121                                        modifiers, plane->type, NULL);
7122         kfree(modifiers);
7123         if (res)
7124                 return res;
7125
7126         if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7127             plane_cap && plane_cap->per_pixel_alpha) {
7128                 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7129                                           BIT(DRM_MODE_BLEND_PREMULTI);
7130
7131                 drm_plane_create_alpha_property(plane);
7132                 drm_plane_create_blend_mode_property(plane, blend_caps);
7133         }
7134
7135         if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7136             plane_cap &&
7137             (plane_cap->pixel_format_support.nv12 ||
7138              plane_cap->pixel_format_support.p010)) {
7139                 /* This only affects YUV formats. */
7140                 drm_plane_create_color_properties(
7141                         plane,
7142                         BIT(DRM_COLOR_YCBCR_BT601) |
7143                         BIT(DRM_COLOR_YCBCR_BT709) |
7144                         BIT(DRM_COLOR_YCBCR_BT2020),
7145                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7146                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7147                         DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7148         }
7149
7150         supported_rotations =
7151                 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7152                 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7153
7154         if (dm->adev->asic_type >= CHIP_BONAIRE &&
7155             plane->type != DRM_PLANE_TYPE_CURSOR)
7156                 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7157                                                    supported_rotations);
7158
7159         drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7160
7161         /* Create (reset) the plane state */
7162         if (plane->funcs->reset)
7163                 plane->funcs->reset(plane);
7164
7165         return 0;
7166 }
7167
7168 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7169                                struct drm_plane *plane,
7170                                uint32_t crtc_index)
7171 {
7172         struct amdgpu_crtc *acrtc = NULL;
7173         struct drm_plane *cursor_plane;
7174
7175         int res = -ENOMEM;
7176
7177         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7178         if (!cursor_plane)
7179                 goto fail;
7180
7181         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7182         res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7183
7184         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7185         if (!acrtc)
7186                 goto fail;
7187
7188         res = drm_crtc_init_with_planes(
7189                         dm->ddev,
7190                         &acrtc->base,
7191                         plane,
7192                         cursor_plane,
7193                         &amdgpu_dm_crtc_funcs, NULL);
7194
7195         if (res)
7196                 goto fail;
7197
7198         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7199
7200         /* Create (reset) the plane state */
7201         if (acrtc->base.funcs->reset)
7202                 acrtc->base.funcs->reset(&acrtc->base);
7203
7204         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7205         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7206
7207         acrtc->crtc_id = crtc_index;
7208         acrtc->base.enabled = false;
7209         acrtc->otg_inst = -1;
7210
7211         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7212         drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7213                                    true, MAX_COLOR_LUT_ENTRIES);
7214         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7215
7216         return 0;
7217
7218 fail:
7219         kfree(acrtc);
7220         kfree(cursor_plane);
7221         return res;
7222 }
7223
7224
7225 static int to_drm_connector_type(enum signal_type st)
7226 {
7227         switch (st) {
7228         case SIGNAL_TYPE_HDMI_TYPE_A:
7229                 return DRM_MODE_CONNECTOR_HDMIA;
7230         case SIGNAL_TYPE_EDP:
7231                 return DRM_MODE_CONNECTOR_eDP;
7232         case SIGNAL_TYPE_LVDS:
7233                 return DRM_MODE_CONNECTOR_LVDS;
7234         case SIGNAL_TYPE_RGB:
7235                 return DRM_MODE_CONNECTOR_VGA;
7236         case SIGNAL_TYPE_DISPLAY_PORT:
7237         case SIGNAL_TYPE_DISPLAY_PORT_MST:
7238                 return DRM_MODE_CONNECTOR_DisplayPort;
7239         case SIGNAL_TYPE_DVI_DUAL_LINK:
7240         case SIGNAL_TYPE_DVI_SINGLE_LINK:
7241                 return DRM_MODE_CONNECTOR_DVID;
7242         case SIGNAL_TYPE_VIRTUAL:
7243                 return DRM_MODE_CONNECTOR_VIRTUAL;
7244
7245         default:
7246                 return DRM_MODE_CONNECTOR_Unknown;
7247         }
7248 }
7249
7250 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7251 {
7252         struct drm_encoder *encoder;
7253
7254         /* There is only one encoder per connector */
7255         drm_connector_for_each_possible_encoder(connector, encoder)
7256                 return encoder;
7257
7258         return NULL;
7259 }
7260
7261 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7262 {
7263         struct drm_encoder *encoder;
7264         struct amdgpu_encoder *amdgpu_encoder;
7265
7266         encoder = amdgpu_dm_connector_to_encoder(connector);
7267
7268         if (encoder == NULL)
7269                 return;
7270
7271         amdgpu_encoder = to_amdgpu_encoder(encoder);
7272
7273         amdgpu_encoder->native_mode.clock = 0;
7274
7275         if (!list_empty(&connector->probed_modes)) {
7276                 struct drm_display_mode *preferred_mode = NULL;
7277
7278                 list_for_each_entry(preferred_mode,
7279                                     &connector->probed_modes,
7280                                     head) {
7281                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7282                                 amdgpu_encoder->native_mode = *preferred_mode;
7283
7284                         break;
7285                 }
7286
7287         }
7288 }
7289
7290 static struct drm_display_mode *
7291 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7292                              char *name,
7293                              int hdisplay, int vdisplay)
7294 {
7295         struct drm_device *dev = encoder->dev;
7296         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7297         struct drm_display_mode *mode = NULL;
7298         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7299
7300         mode = drm_mode_duplicate(dev, native_mode);
7301
7302         if (mode == NULL)
7303                 return NULL;
7304
7305         mode->hdisplay = hdisplay;
7306         mode->vdisplay = vdisplay;
7307         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7308         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7309
7310         return mode;
7311
7312 }
7313
7314 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7315                                                  struct drm_connector *connector)
7316 {
7317         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7318         struct drm_display_mode *mode = NULL;
7319         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7320         struct amdgpu_dm_connector *amdgpu_dm_connector =
7321                                 to_amdgpu_dm_connector(connector);
7322         int i;
7323         int n;
7324         struct mode_size {
7325                 char name[DRM_DISPLAY_MODE_LEN];
7326                 int w;
7327                 int h;
7328         } common_modes[] = {
7329                 {  "640x480",  640,  480},
7330                 {  "800x600",  800,  600},
7331                 { "1024x768", 1024,  768},
7332                 { "1280x720", 1280,  720},
7333                 { "1280x800", 1280,  800},
7334                 {"1280x1024", 1280, 1024},
7335                 { "1440x900", 1440,  900},
7336                 {"1680x1050", 1680, 1050},
7337                 {"1600x1200", 1600, 1200},
7338                 {"1920x1080", 1920, 1080},
7339                 {"1920x1200", 1920, 1200}
7340         };
7341
7342         n = ARRAY_SIZE(common_modes);
7343
7344         for (i = 0; i < n; i++) {
7345                 struct drm_display_mode *curmode = NULL;
7346                 bool mode_existed = false;
7347
7348                 if (common_modes[i].w > native_mode->hdisplay ||
7349                     common_modes[i].h > native_mode->vdisplay ||
7350                    (common_modes[i].w == native_mode->hdisplay &&
7351                     common_modes[i].h == native_mode->vdisplay))
7352                         continue;
7353
7354                 list_for_each_entry(curmode, &connector->probed_modes, head) {
7355                         if (common_modes[i].w == curmode->hdisplay &&
7356                             common_modes[i].h == curmode->vdisplay) {
7357                                 mode_existed = true;
7358                                 break;
7359                         }
7360                 }
7361
7362                 if (mode_existed)
7363                         continue;
7364
7365                 mode = amdgpu_dm_create_common_mode(encoder,
7366                                 common_modes[i].name, common_modes[i].w,
7367                                 common_modes[i].h);
7368                 drm_mode_probed_add(connector, mode);
7369                 amdgpu_dm_connector->num_modes++;
7370         }
7371 }
7372
7373 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7374                                               struct edid *edid)
7375 {
7376         struct amdgpu_dm_connector *amdgpu_dm_connector =
7377                         to_amdgpu_dm_connector(connector);
7378
7379         if (edid) {
7380                 /* empty probed_modes */
7381                 INIT_LIST_HEAD(&connector->probed_modes);
7382                 amdgpu_dm_connector->num_modes =
7383                                 drm_add_edid_modes(connector, edid);
7384
7385                 /* sorting the probed modes before calling function
7386                  * amdgpu_dm_get_native_mode() since EDID can have
7387                  * more than one preferred mode. The modes that are
7388                  * later in the probed mode list could be of higher
7389                  * and preferred resolution. For example, 3840x2160
7390                  * resolution in base EDID preferred timing and 4096x2160
7391                  * preferred resolution in DID extension block later.
7392                  */
7393                 drm_mode_sort(&connector->probed_modes);
7394                 amdgpu_dm_get_native_mode(connector);
7395
7396                 /* Freesync capabilities are reset by calling
7397                  * drm_add_edid_modes() and need to be
7398                  * restored here.
7399                  */
7400                 amdgpu_dm_update_freesync_caps(connector, edid);
7401         } else {
7402                 amdgpu_dm_connector->num_modes = 0;
7403         }
7404 }
7405
7406 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7407                               struct drm_display_mode *mode)
7408 {
7409         struct drm_display_mode *m;
7410
7411         list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7412                 if (drm_mode_equal(m, mode))
7413                         return true;
7414         }
7415
7416         return false;
7417 }
7418
7419 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7420 {
7421         const struct drm_display_mode *m;
7422         struct drm_display_mode *new_mode;
7423         uint i;
7424         uint32_t new_modes_count = 0;
7425
7426         /* Standard FPS values
7427          *
7428          * 23.976   - TV/NTSC
7429          * 24       - Cinema
7430          * 25       - TV/PAL
7431          * 29.97    - TV/NTSC
7432          * 30       - TV/NTSC
7433          * 48       - Cinema HFR
7434          * 50       - TV/PAL
7435          * 60       - Commonly used
7436          * 48,72,96 - Multiples of 24
7437          */
7438         const uint32_t common_rates[] = { 23976, 24000, 25000, 29970, 30000,
7439                                          48000, 50000, 60000, 72000, 96000 };
7440
7441         /*
7442          * Find mode with highest refresh rate with the same resolution
7443          * as the preferred mode. Some monitors report a preferred mode
7444          * with lower resolution than the highest refresh rate supported.
7445          */
7446
7447         m = get_highest_refresh_rate_mode(aconnector, true);
7448         if (!m)
7449                 return 0;
7450
7451         for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7452                 uint64_t target_vtotal, target_vtotal_diff;
7453                 uint64_t num, den;
7454
7455                 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7456                         continue;
7457
7458                 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7459                     common_rates[i] > aconnector->max_vfreq * 1000)
7460                         continue;
7461
7462                 num = (unsigned long long)m->clock * 1000 * 1000;
7463                 den = common_rates[i] * (unsigned long long)m->htotal;
7464                 target_vtotal = div_u64(num, den);
7465                 target_vtotal_diff = target_vtotal - m->vtotal;
7466
7467                 /* Check for illegal modes */
7468                 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7469                     m->vsync_end + target_vtotal_diff < m->vsync_start ||
7470                     m->vtotal + target_vtotal_diff < m->vsync_end)
7471                         continue;
7472
7473                 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7474                 if (!new_mode)
7475                         goto out;
7476
7477                 new_mode->vtotal += (u16)target_vtotal_diff;
7478                 new_mode->vsync_start += (u16)target_vtotal_diff;
7479                 new_mode->vsync_end += (u16)target_vtotal_diff;
7480                 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7481                 new_mode->type |= DRM_MODE_TYPE_DRIVER;
7482
7483                 if (!is_duplicate_mode(aconnector, new_mode)) {
7484                         drm_mode_probed_add(&aconnector->base, new_mode);
7485                         new_modes_count += 1;
7486                 } else
7487                         drm_mode_destroy(aconnector->base.dev, new_mode);
7488         }
7489  out:
7490         return new_modes_count;
7491 }
7492
7493 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7494                                                    struct edid *edid)
7495 {
7496         struct amdgpu_dm_connector *amdgpu_dm_connector =
7497                 to_amdgpu_dm_connector(connector);
7498
7499         if (!(amdgpu_freesync_vid_mode && edid))
7500                 return;
7501
7502         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7503                 amdgpu_dm_connector->num_modes +=
7504                         add_fs_modes(amdgpu_dm_connector);
7505 }
7506
7507 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
7508 {
7509         struct amdgpu_dm_connector *amdgpu_dm_connector =
7510                         to_amdgpu_dm_connector(connector);
7511         struct drm_encoder *encoder;
7512         struct edid *edid = amdgpu_dm_connector->edid;
7513
7514         encoder = amdgpu_dm_connector_to_encoder(connector);
7515
7516         if (!drm_edid_is_valid(edid)) {
7517                 amdgpu_dm_connector->num_modes =
7518                                 drm_add_modes_noedid(connector, 640, 480);
7519         } else {
7520                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
7521                 amdgpu_dm_connector_add_common_modes(encoder, connector);
7522                 amdgpu_dm_connector_add_freesync_modes(connector, edid);
7523         }
7524         amdgpu_dm_fbc_init(connector);
7525
7526         return amdgpu_dm_connector->num_modes;
7527 }
7528
7529 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7530                                      struct amdgpu_dm_connector *aconnector,
7531                                      int connector_type,
7532                                      struct dc_link *link,
7533                                      int link_index)
7534 {
7535         struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7536
7537         /*
7538          * Some of the properties below require access to state, like bpc.
7539          * Allocate some default initial connector state with our reset helper.
7540          */
7541         if (aconnector->base.funcs->reset)
7542                 aconnector->base.funcs->reset(&aconnector->base);
7543
7544         aconnector->connector_id = link_index;
7545         aconnector->dc_link = link;
7546         aconnector->base.interlace_allowed = false;
7547         aconnector->base.doublescan_allowed = false;
7548         aconnector->base.stereo_allowed = false;
7549         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7550         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7551         aconnector->audio_inst = -1;
7552         mutex_init(&aconnector->hpd_lock);
7553
7554         /*
7555          * configure support HPD hot plug connector_>polled default value is 0
7556          * which means HPD hot plug not supported
7557          */
7558         switch (connector_type) {
7559         case DRM_MODE_CONNECTOR_HDMIA:
7560                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7561                 aconnector->base.ycbcr_420_allowed =
7562                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7563                 break;
7564         case DRM_MODE_CONNECTOR_DisplayPort:
7565                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7566                 aconnector->base.ycbcr_420_allowed =
7567                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
7568                 break;
7569         case DRM_MODE_CONNECTOR_DVID:
7570                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7571                 break;
7572         default:
7573                 break;
7574         }
7575
7576         drm_object_attach_property(&aconnector->base.base,
7577                                 dm->ddev->mode_config.scaling_mode_property,
7578                                 DRM_MODE_SCALE_NONE);
7579
7580         drm_object_attach_property(&aconnector->base.base,
7581                                 adev->mode_info.underscan_property,
7582                                 UNDERSCAN_OFF);
7583         drm_object_attach_property(&aconnector->base.base,
7584                                 adev->mode_info.underscan_hborder_property,
7585                                 0);
7586         drm_object_attach_property(&aconnector->base.base,
7587                                 adev->mode_info.underscan_vborder_property,
7588                                 0);
7589
7590         if (!aconnector->mst_port)
7591                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7592
7593         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
7594         aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7595         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7596
7597         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7598             (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7599                 drm_object_attach_property(&aconnector->base.base,
7600                                 adev->mode_info.abm_level_property, 0);
7601         }
7602
7603         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7604             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7605             connector_type == DRM_MODE_CONNECTOR_eDP) {
7606                 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
7607
7608                 if (!aconnector->mst_port)
7609                         drm_connector_attach_vrr_capable_property(&aconnector->base);
7610
7611 #ifdef CONFIG_DRM_AMD_DC_HDCP
7612                 if (adev->dm.hdcp_workqueue)
7613                         drm_connector_attach_content_protection_property(&aconnector->base, true);
7614 #endif
7615         }
7616 }
7617
7618 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7619                               struct i2c_msg *msgs, int num)
7620 {
7621         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7622         struct ddc_service *ddc_service = i2c->ddc_service;
7623         struct i2c_command cmd;
7624         int i;
7625         int result = -EIO;
7626
7627         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7628
7629         if (!cmd.payloads)
7630                 return result;
7631
7632         cmd.number_of_payloads = num;
7633         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7634         cmd.speed = 100;
7635
7636         for (i = 0; i < num; i++) {
7637                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7638                 cmd.payloads[i].address = msgs[i].addr;
7639                 cmd.payloads[i].length = msgs[i].len;
7640                 cmd.payloads[i].data = msgs[i].buf;
7641         }
7642
7643         if (dc_submit_i2c(
7644                         ddc_service->ctx->dc,
7645                         ddc_service->ddc_pin->hw_info.ddc_channel,
7646                         &cmd))
7647                 result = num;
7648
7649         kfree(cmd.payloads);
7650         return result;
7651 }
7652
7653 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7654 {
7655         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7656 }
7657
7658 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7659         .master_xfer = amdgpu_dm_i2c_xfer,
7660         .functionality = amdgpu_dm_i2c_func,
7661 };
7662
7663 static struct amdgpu_i2c_adapter *
7664 create_i2c(struct ddc_service *ddc_service,
7665            int link_index,
7666            int *res)
7667 {
7668         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7669         struct amdgpu_i2c_adapter *i2c;
7670
7671         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7672         if (!i2c)
7673                 return NULL;
7674         i2c->base.owner = THIS_MODULE;
7675         i2c->base.class = I2C_CLASS_DDC;
7676         i2c->base.dev.parent = &adev->pdev->dev;
7677         i2c->base.algo = &amdgpu_dm_i2c_algo;
7678         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7679         i2c_set_adapdata(&i2c->base, i2c);
7680         i2c->ddc_service = ddc_service;
7681         i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7682
7683         return i2c;
7684 }
7685
7686
7687 /*
7688  * Note: this function assumes that dc_link_detect() was called for the
7689  * dc_link which will be represented by this aconnector.
7690  */
7691 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7692                                     struct amdgpu_dm_connector *aconnector,
7693                                     uint32_t link_index,
7694                                     struct amdgpu_encoder *aencoder)
7695 {
7696         int res = 0;
7697         int connector_type;
7698         struct dc *dc = dm->dc;
7699         struct dc_link *link = dc_get_link_at_index(dc, link_index);
7700         struct amdgpu_i2c_adapter *i2c;
7701
7702         link->priv = aconnector;
7703
7704         DRM_DEBUG_DRIVER("%s()\n", __func__);
7705
7706         i2c = create_i2c(link->ddc, link->link_index, &res);
7707         if (!i2c) {
7708                 DRM_ERROR("Failed to create i2c adapter data\n");
7709                 return -ENOMEM;
7710         }
7711
7712         aconnector->i2c = i2c;
7713         res = i2c_add_adapter(&i2c->base);
7714
7715         if (res) {
7716                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7717                 goto out_free;
7718         }
7719
7720         connector_type = to_drm_connector_type(link->connector_signal);
7721
7722         res = drm_connector_init_with_ddc(
7723                         dm->ddev,
7724                         &aconnector->base,
7725                         &amdgpu_dm_connector_funcs,
7726                         connector_type,
7727                         &i2c->base);
7728
7729         if (res) {
7730                 DRM_ERROR("connector_init failed\n");
7731                 aconnector->connector_id = -1;
7732                 goto out_free;
7733         }
7734
7735         drm_connector_helper_add(
7736                         &aconnector->base,
7737                         &amdgpu_dm_connector_helper_funcs);
7738
7739         amdgpu_dm_connector_init_helper(
7740                 dm,
7741                 aconnector,
7742                 connector_type,
7743                 link,
7744                 link_index);
7745
7746         drm_connector_attach_encoder(
7747                 &aconnector->base, &aencoder->base);
7748
7749         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7750                 || connector_type == DRM_MODE_CONNECTOR_eDP)
7751                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7752
7753 out_free:
7754         if (res) {
7755                 kfree(i2c);
7756                 aconnector->i2c = NULL;
7757         }
7758         return res;
7759 }
7760
7761 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7762 {
7763         switch (adev->mode_info.num_crtc) {
7764         case 1:
7765                 return 0x1;
7766         case 2:
7767                 return 0x3;
7768         case 3:
7769                 return 0x7;
7770         case 4:
7771                 return 0xf;
7772         case 5:
7773                 return 0x1f;
7774         case 6:
7775         default:
7776                 return 0x3f;
7777         }
7778 }
7779
7780 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7781                                   struct amdgpu_encoder *aencoder,
7782                                   uint32_t link_index)
7783 {
7784         struct amdgpu_device *adev = drm_to_adev(dev);
7785
7786         int res = drm_encoder_init(dev,
7787                                    &aencoder->base,
7788                                    &amdgpu_dm_encoder_funcs,
7789                                    DRM_MODE_ENCODER_TMDS,
7790                                    NULL);
7791
7792         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7793
7794         if (!res)
7795                 aencoder->encoder_id = link_index;
7796         else
7797                 aencoder->encoder_id = -1;
7798
7799         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7800
7801         return res;
7802 }
7803
7804 static void manage_dm_interrupts(struct amdgpu_device *adev,
7805                                  struct amdgpu_crtc *acrtc,
7806                                  bool enable)
7807 {
7808         /*
7809          * We have no guarantee that the frontend index maps to the same
7810          * backend index - some even map to more than one.
7811          *
7812          * TODO: Use a different interrupt or check DC itself for the mapping.
7813          */
7814         int irq_type =
7815                 amdgpu_display_crtc_idx_to_irq_type(
7816                         adev,
7817                         acrtc->crtc_id);
7818
7819         if (enable) {
7820                 drm_crtc_vblank_on(&acrtc->base);
7821                 amdgpu_irq_get(
7822                         adev,
7823                         &adev->pageflip_irq,
7824                         irq_type);
7825 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7826                 amdgpu_irq_get(
7827                         adev,
7828                         &adev->vline0_irq,
7829                         irq_type);
7830 #endif
7831         } else {
7832 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7833                 amdgpu_irq_put(
7834                         adev,
7835                         &adev->vline0_irq,
7836                         irq_type);
7837 #endif
7838                 amdgpu_irq_put(
7839                         adev,
7840                         &adev->pageflip_irq,
7841                         irq_type);
7842                 drm_crtc_vblank_off(&acrtc->base);
7843         }
7844 }
7845
7846 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7847                                       struct amdgpu_crtc *acrtc)
7848 {
7849         int irq_type =
7850                 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7851
7852         /**
7853          * This reads the current state for the IRQ and force reapplies
7854          * the setting to hardware.
7855          */
7856         amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7857 }
7858
7859 static bool
7860 is_scaling_state_different(const struct dm_connector_state *dm_state,
7861                            const struct dm_connector_state *old_dm_state)
7862 {
7863         if (dm_state->scaling != old_dm_state->scaling)
7864                 return true;
7865         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7866                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7867                         return true;
7868         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7869                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7870                         return true;
7871         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7872                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7873                 return true;
7874         return false;
7875 }
7876
7877 #ifdef CONFIG_DRM_AMD_DC_HDCP
7878 static bool is_content_protection_different(struct drm_connector_state *state,
7879                                             const struct drm_connector_state *old_state,
7880                                             const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7881 {
7882         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7883         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7884
7885         /* Handle: Type0/1 change */
7886         if (old_state->hdcp_content_type != state->hdcp_content_type &&
7887             state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7888                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7889                 return true;
7890         }
7891
7892         /* CP is being re enabled, ignore this
7893          *
7894          * Handles:     ENABLED -> DESIRED
7895          */
7896         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7897             state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7898                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7899                 return false;
7900         }
7901
7902         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7903          *
7904          * Handles:     UNDESIRED -> ENABLED
7905          */
7906         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7907             state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7908                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7909
7910         /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7911          * hot-plug, headless s3, dpms
7912          *
7913          * Handles:     DESIRED -> DESIRED (Special case)
7914          */
7915         if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7916             connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7917                 dm_con_state->update_hdcp = false;
7918                 return true;
7919         }
7920
7921         /*
7922          * Handles:     UNDESIRED -> UNDESIRED
7923          *              DESIRED -> DESIRED
7924          *              ENABLED -> ENABLED
7925          */
7926         if (old_state->content_protection == state->content_protection)
7927                 return false;
7928
7929         /*
7930          * Handles:     UNDESIRED -> DESIRED
7931          *              DESIRED -> UNDESIRED
7932          *              ENABLED -> UNDESIRED
7933          */
7934         if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7935                 return true;
7936
7937         /*
7938          * Handles:     DESIRED -> ENABLED
7939          */
7940         return false;
7941 }
7942
7943 #endif
7944 static void remove_stream(struct amdgpu_device *adev,
7945                           struct amdgpu_crtc *acrtc,
7946                           struct dc_stream_state *stream)
7947 {
7948         /* this is the update mode case */
7949
7950         acrtc->otg_inst = -1;
7951         acrtc->enabled = false;
7952 }
7953
7954 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7955                                struct dc_cursor_position *position)
7956 {
7957         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7958         int x, y;
7959         int xorigin = 0, yorigin = 0;
7960
7961         if (!crtc || !plane->state->fb)
7962                 return 0;
7963
7964         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7965             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7966                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7967                           __func__,
7968                           plane->state->crtc_w,
7969                           plane->state->crtc_h);
7970                 return -EINVAL;
7971         }
7972
7973         x = plane->state->crtc_x;
7974         y = plane->state->crtc_y;
7975
7976         if (x <= -amdgpu_crtc->max_cursor_width ||
7977             y <= -amdgpu_crtc->max_cursor_height)
7978                 return 0;
7979
7980         if (x < 0) {
7981                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
7982                 x = 0;
7983         }
7984         if (y < 0) {
7985                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
7986                 y = 0;
7987         }
7988         position->enable = true;
7989         position->translate_by_source = true;
7990         position->x = x;
7991         position->y = y;
7992         position->x_hotspot = xorigin;
7993         position->y_hotspot = yorigin;
7994
7995         return 0;
7996 }
7997
7998 static void handle_cursor_update(struct drm_plane *plane,
7999                                  struct drm_plane_state *old_plane_state)
8000 {
8001         struct amdgpu_device *adev = drm_to_adev(plane->dev);
8002         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8003         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8004         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8005         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8006         uint64_t address = afb ? afb->address : 0;
8007         struct dc_cursor_position position = {0};
8008         struct dc_cursor_attributes attributes;
8009         int ret;
8010
8011         if (!plane->state->fb && !old_plane_state->fb)
8012                 return;
8013
8014         DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8015                       __func__,
8016                       amdgpu_crtc->crtc_id,
8017                       plane->state->crtc_w,
8018                       plane->state->crtc_h);
8019
8020         ret = get_cursor_position(plane, crtc, &position);
8021         if (ret)
8022                 return;
8023
8024         if (!position.enable) {
8025                 /* turn off cursor */
8026                 if (crtc_state && crtc_state->stream) {
8027                         mutex_lock(&adev->dm.dc_lock);
8028                         dc_stream_set_cursor_position(crtc_state->stream,
8029                                                       &position);
8030                         mutex_unlock(&adev->dm.dc_lock);
8031                 }
8032                 return;
8033         }
8034
8035         amdgpu_crtc->cursor_width = plane->state->crtc_w;
8036         amdgpu_crtc->cursor_height = plane->state->crtc_h;
8037
8038         memset(&attributes, 0, sizeof(attributes));
8039         attributes.address.high_part = upper_32_bits(address);
8040         attributes.address.low_part  = lower_32_bits(address);
8041         attributes.width             = plane->state->crtc_w;
8042         attributes.height            = plane->state->crtc_h;
8043         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8044         attributes.rotation_angle    = 0;
8045         attributes.attribute_flags.value = 0;
8046
8047         attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8048
8049         if (crtc_state->stream) {
8050                 mutex_lock(&adev->dm.dc_lock);
8051                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8052                                                          &attributes))
8053                         DRM_ERROR("DC failed to set cursor attributes\n");
8054
8055                 if (!dc_stream_set_cursor_position(crtc_state->stream,
8056                                                    &position))
8057                         DRM_ERROR("DC failed to set cursor position\n");
8058                 mutex_unlock(&adev->dm.dc_lock);
8059         }
8060 }
8061
8062 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8063 {
8064
8065         assert_spin_locked(&acrtc->base.dev->event_lock);
8066         WARN_ON(acrtc->event);
8067
8068         acrtc->event = acrtc->base.state->event;
8069
8070         /* Set the flip status */
8071         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8072
8073         /* Mark this event as consumed */
8074         acrtc->base.state->event = NULL;
8075
8076         DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8077                      acrtc->crtc_id);
8078 }
8079
8080 static void update_freesync_state_on_stream(
8081         struct amdgpu_display_manager *dm,
8082         struct dm_crtc_state *new_crtc_state,
8083         struct dc_stream_state *new_stream,
8084         struct dc_plane_state *surface,
8085         u32 flip_timestamp_in_us)
8086 {
8087         struct mod_vrr_params vrr_params;
8088         struct dc_info_packet vrr_infopacket = {0};
8089         struct amdgpu_device *adev = dm->adev;
8090         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8091         unsigned long flags;
8092         bool pack_sdp_v1_3 = false;
8093
8094         if (!new_stream)
8095                 return;
8096
8097         /*
8098          * TODO: Determine why min/max totals and vrefresh can be 0 here.
8099          * For now it's sufficient to just guard against these conditions.
8100          */
8101
8102         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8103                 return;
8104
8105         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8106         vrr_params = acrtc->dm_irq_params.vrr_params;
8107
8108         if (surface) {
8109                 mod_freesync_handle_preflip(
8110                         dm->freesync_module,
8111                         surface,
8112                         new_stream,
8113                         flip_timestamp_in_us,
8114                         &vrr_params);
8115
8116                 if (adev->family < AMDGPU_FAMILY_AI &&
8117                     amdgpu_dm_vrr_active(new_crtc_state)) {
8118                         mod_freesync_handle_v_update(dm->freesync_module,
8119                                                      new_stream, &vrr_params);
8120
8121                         /* Need to call this before the frame ends. */
8122                         dc_stream_adjust_vmin_vmax(dm->dc,
8123                                                    new_crtc_state->stream,
8124                                                    &vrr_params.adjust);
8125                 }
8126         }
8127
8128         mod_freesync_build_vrr_infopacket(
8129                 dm->freesync_module,
8130                 new_stream,
8131                 &vrr_params,
8132                 PACKET_TYPE_VRR,
8133                 TRANSFER_FUNC_UNKNOWN,
8134                 &vrr_infopacket,
8135                 pack_sdp_v1_3);
8136
8137         new_crtc_state->freesync_timing_changed |=
8138                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8139                         &vrr_params.adjust,
8140                         sizeof(vrr_params.adjust)) != 0);
8141
8142         new_crtc_state->freesync_vrr_info_changed |=
8143                 (memcmp(&new_crtc_state->vrr_infopacket,
8144                         &vrr_infopacket,
8145                         sizeof(vrr_infopacket)) != 0);
8146
8147         acrtc->dm_irq_params.vrr_params = vrr_params;
8148         new_crtc_state->vrr_infopacket = vrr_infopacket;
8149
8150         new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8151         new_stream->vrr_infopacket = vrr_infopacket;
8152
8153         if (new_crtc_state->freesync_vrr_info_changed)
8154                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8155                               new_crtc_state->base.crtc->base.id,
8156                               (int)new_crtc_state->base.vrr_enabled,
8157                               (int)vrr_params.state);
8158
8159         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8160 }
8161
8162 static void update_stream_irq_parameters(
8163         struct amdgpu_display_manager *dm,
8164         struct dm_crtc_state *new_crtc_state)
8165 {
8166         struct dc_stream_state *new_stream = new_crtc_state->stream;
8167         struct mod_vrr_params vrr_params;
8168         struct mod_freesync_config config = new_crtc_state->freesync_config;
8169         struct amdgpu_device *adev = dm->adev;
8170         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8171         unsigned long flags;
8172
8173         if (!new_stream)
8174                 return;
8175
8176         /*
8177          * TODO: Determine why min/max totals and vrefresh can be 0 here.
8178          * For now it's sufficient to just guard against these conditions.
8179          */
8180         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8181                 return;
8182
8183         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8184         vrr_params = acrtc->dm_irq_params.vrr_params;
8185
8186         if (new_crtc_state->vrr_supported &&
8187             config.min_refresh_in_uhz &&
8188             config.max_refresh_in_uhz) {
8189                 /*
8190                  * if freesync compatible mode was set, config.state will be set
8191                  * in atomic check
8192                  */
8193                 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8194                     (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8195                      new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8196                         vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8197                         vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8198                         vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8199                         vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8200                 } else {
8201                         config.state = new_crtc_state->base.vrr_enabled ?
8202                                                      VRR_STATE_ACTIVE_VARIABLE :
8203                                                      VRR_STATE_INACTIVE;
8204                 }
8205         } else {
8206                 config.state = VRR_STATE_UNSUPPORTED;
8207         }
8208
8209         mod_freesync_build_vrr_params(dm->freesync_module,
8210                                       new_stream,
8211                                       &config, &vrr_params);
8212
8213         new_crtc_state->freesync_timing_changed |=
8214                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8215                         &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8216
8217         new_crtc_state->freesync_config = config;
8218         /* Copy state for access from DM IRQ handler */
8219         acrtc->dm_irq_params.freesync_config = config;
8220         acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8221         acrtc->dm_irq_params.vrr_params = vrr_params;
8222         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8223 }
8224
8225 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8226                                             struct dm_crtc_state *new_state)
8227 {
8228         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8229         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8230
8231         if (!old_vrr_active && new_vrr_active) {
8232                 /* Transition VRR inactive -> active:
8233                  * While VRR is active, we must not disable vblank irq, as a
8234                  * reenable after disable would compute bogus vblank/pflip
8235                  * timestamps if it likely happened inside display front-porch.
8236                  *
8237                  * We also need vupdate irq for the actual core vblank handling
8238                  * at end of vblank.
8239                  */
8240                 dm_set_vupdate_irq(new_state->base.crtc, true);
8241                 drm_crtc_vblank_get(new_state->base.crtc);
8242                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8243                                  __func__, new_state->base.crtc->base.id);
8244         } else if (old_vrr_active && !new_vrr_active) {
8245                 /* Transition VRR active -> inactive:
8246                  * Allow vblank irq disable again for fixed refresh rate.
8247                  */
8248                 dm_set_vupdate_irq(new_state->base.crtc, false);
8249                 drm_crtc_vblank_put(new_state->base.crtc);
8250                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8251                                  __func__, new_state->base.crtc->base.id);
8252         }
8253 }
8254
8255 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8256 {
8257         struct drm_plane *plane;
8258         struct drm_plane_state *old_plane_state;
8259         int i;
8260
8261         /*
8262          * TODO: Make this per-stream so we don't issue redundant updates for
8263          * commits with multiple streams.
8264          */
8265         for_each_old_plane_in_state(state, plane, old_plane_state, i)
8266                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8267                         handle_cursor_update(plane, old_plane_state);
8268 }
8269
8270 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8271                                     struct dc_state *dc_state,
8272                                     struct drm_device *dev,
8273                                     struct amdgpu_display_manager *dm,
8274                                     struct drm_crtc *pcrtc,
8275                                     bool wait_for_vblank)
8276 {
8277         uint32_t i;
8278         uint64_t timestamp_ns;
8279         struct drm_plane *plane;
8280         struct drm_plane_state *old_plane_state, *new_plane_state;
8281         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8282         struct drm_crtc_state *new_pcrtc_state =
8283                         drm_atomic_get_new_crtc_state(state, pcrtc);
8284         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8285         struct dm_crtc_state *dm_old_crtc_state =
8286                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8287         int planes_count = 0, vpos, hpos;
8288         long r;
8289         unsigned long flags;
8290         struct amdgpu_bo *abo;
8291         uint32_t target_vblank, last_flip_vblank;
8292         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8293         bool pflip_present = false;
8294         struct {
8295                 struct dc_surface_update surface_updates[MAX_SURFACES];
8296                 struct dc_plane_info plane_infos[MAX_SURFACES];
8297                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8298                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8299                 struct dc_stream_update stream_update;
8300         } *bundle;
8301
8302         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8303
8304         if (!bundle) {
8305                 dm_error("Failed to allocate update bundle\n");
8306                 goto cleanup;
8307         }
8308
8309         /*
8310          * Disable the cursor first if we're disabling all the planes.
8311          * It'll remain on the screen after the planes are re-enabled
8312          * if we don't.
8313          */
8314         if (acrtc_state->active_planes == 0)
8315                 amdgpu_dm_commit_cursors(state);
8316
8317         /* update planes when needed */
8318         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8319                 struct drm_crtc *crtc = new_plane_state->crtc;
8320                 struct drm_crtc_state *new_crtc_state;
8321                 struct drm_framebuffer *fb = new_plane_state->fb;
8322                 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8323                 bool plane_needs_flip;
8324                 struct dc_plane_state *dc_plane;
8325                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8326
8327                 /* Cursor plane is handled after stream updates */
8328                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8329                         continue;
8330
8331                 if (!fb || !crtc || pcrtc != crtc)
8332                         continue;
8333
8334                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8335                 if (!new_crtc_state->active)
8336                         continue;
8337
8338                 dc_plane = dm_new_plane_state->dc_state;
8339
8340                 bundle->surface_updates[planes_count].surface = dc_plane;
8341                 if (new_pcrtc_state->color_mgmt_changed) {
8342                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8343                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8344                         bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8345                 }
8346
8347                 fill_dc_scaling_info(new_plane_state,
8348                                      &bundle->scaling_infos[planes_count]);
8349
8350                 bundle->surface_updates[planes_count].scaling_info =
8351                         &bundle->scaling_infos[planes_count];
8352
8353                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8354
8355                 pflip_present = pflip_present || plane_needs_flip;
8356
8357                 if (!plane_needs_flip) {
8358                         planes_count += 1;
8359                         continue;
8360                 }
8361
8362                 abo = gem_to_amdgpu_bo(fb->obj[0]);
8363
8364                 /*
8365                  * Wait for all fences on this FB. Do limited wait to avoid
8366                  * deadlock during GPU reset when this fence will not signal
8367                  * but we hold reservation lock for the BO.
8368                  */
8369                 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
8370                                                         false,
8371                                                         msecs_to_jiffies(5000));
8372                 if (unlikely(r <= 0))
8373                         DRM_ERROR("Waiting for fences timed out!");
8374
8375                 fill_dc_plane_info_and_addr(
8376                         dm->adev, new_plane_state,
8377                         afb->tiling_flags,
8378                         &bundle->plane_infos[planes_count],
8379                         &bundle->flip_addrs[planes_count].address,
8380                         afb->tmz_surface, false);
8381
8382                 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
8383                                  new_plane_state->plane->index,
8384                                  bundle->plane_infos[planes_count].dcc.enable);
8385
8386                 bundle->surface_updates[planes_count].plane_info =
8387                         &bundle->plane_infos[planes_count];
8388
8389                 /*
8390                  * Only allow immediate flips for fast updates that don't
8391                  * change FB pitch, DCC state, rotation or mirroing.
8392                  */
8393                 bundle->flip_addrs[planes_count].flip_immediate =
8394                         crtc->state->async_flip &&
8395                         acrtc_state->update_type == UPDATE_TYPE_FAST;
8396
8397                 timestamp_ns = ktime_get_ns();
8398                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8399                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8400                 bundle->surface_updates[planes_count].surface = dc_plane;
8401
8402                 if (!bundle->surface_updates[planes_count].surface) {
8403                         DRM_ERROR("No surface for CRTC: id=%d\n",
8404                                         acrtc_attach->crtc_id);
8405                         continue;
8406                 }
8407
8408                 if (plane == pcrtc->primary)
8409                         update_freesync_state_on_stream(
8410                                 dm,
8411                                 acrtc_state,
8412                                 acrtc_state->stream,
8413                                 dc_plane,
8414                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
8415
8416                 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
8417                                  __func__,
8418                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8419                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
8420
8421                 planes_count += 1;
8422
8423         }
8424
8425         if (pflip_present) {
8426                 if (!vrr_active) {
8427                         /* Use old throttling in non-vrr fixed refresh rate mode
8428                          * to keep flip scheduling based on target vblank counts
8429                          * working in a backwards compatible way, e.g., for
8430                          * clients using the GLX_OML_sync_control extension or
8431                          * DRI3/Present extension with defined target_msc.
8432                          */
8433                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
8434                 }
8435                 else {
8436                         /* For variable refresh rate mode only:
8437                          * Get vblank of last completed flip to avoid > 1 vrr
8438                          * flips per video frame by use of throttling, but allow
8439                          * flip programming anywhere in the possibly large
8440                          * variable vrr vblank interval for fine-grained flip
8441                          * timing control and more opportunity to avoid stutter
8442                          * on late submission of flips.
8443                          */
8444                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8445                         last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
8446                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8447                 }
8448
8449                 target_vblank = last_flip_vblank + wait_for_vblank;
8450
8451                 /*
8452                  * Wait until we're out of the vertical blank period before the one
8453                  * targeted by the flip
8454                  */
8455                 while ((acrtc_attach->enabled &&
8456                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8457                                                             0, &vpos, &hpos, NULL,
8458                                                             NULL, &pcrtc->hwmode)
8459                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8460                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8461                         (int)(target_vblank -
8462                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8463                         usleep_range(1000, 1100);
8464                 }
8465
8466                 /**
8467                  * Prepare the flip event for the pageflip interrupt to handle.
8468                  *
8469                  * This only works in the case where we've already turned on the
8470                  * appropriate hardware blocks (eg. HUBP) so in the transition case
8471                  * from 0 -> n planes we have to skip a hardware generated event
8472                  * and rely on sending it from software.
8473                  */
8474                 if (acrtc_attach->base.state->event &&
8475                     acrtc_state->active_planes > 0) {
8476                         drm_crtc_vblank_get(pcrtc);
8477
8478                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8479
8480                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8481                         prepare_flip_isr(acrtc_attach);
8482
8483                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8484                 }
8485
8486                 if (acrtc_state->stream) {
8487                         if (acrtc_state->freesync_vrr_info_changed)
8488                                 bundle->stream_update.vrr_infopacket =
8489                                         &acrtc_state->stream->vrr_infopacket;
8490                 }
8491         }
8492
8493         /* Update the planes if changed or disable if we don't have any. */
8494         if ((planes_count || acrtc_state->active_planes == 0) &&
8495                 acrtc_state->stream) {
8496                 bundle->stream_update.stream = acrtc_state->stream;
8497                 if (new_pcrtc_state->mode_changed) {
8498                         bundle->stream_update.src = acrtc_state->stream->src;
8499                         bundle->stream_update.dst = acrtc_state->stream->dst;
8500                 }
8501
8502                 if (new_pcrtc_state->color_mgmt_changed) {
8503                         /*
8504                          * TODO: This isn't fully correct since we've actually
8505                          * already modified the stream in place.
8506                          */
8507                         bundle->stream_update.gamut_remap =
8508                                 &acrtc_state->stream->gamut_remap_matrix;
8509                         bundle->stream_update.output_csc_transform =
8510                                 &acrtc_state->stream->csc_color_matrix;
8511                         bundle->stream_update.out_transfer_func =
8512                                 acrtc_state->stream->out_transfer_func;
8513                 }
8514
8515                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
8516                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
8517                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
8518
8519                 /*
8520                  * If FreeSync state on the stream has changed then we need to
8521                  * re-adjust the min/max bounds now that DC doesn't handle this
8522                  * as part of commit.
8523                  */
8524                 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
8525                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8526                         dc_stream_adjust_vmin_vmax(
8527                                 dm->dc, acrtc_state->stream,
8528                                 &acrtc_attach->dm_irq_params.vrr_params.adjust);
8529                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8530                 }
8531                 mutex_lock(&dm->dc_lock);
8532                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8533                                 acrtc_state->stream->link->psr_settings.psr_allow_active)
8534                         amdgpu_dm_psr_disable(acrtc_state->stream);
8535
8536                 dc_commit_updates_for_stream(dm->dc,
8537                                                      bundle->surface_updates,
8538                                                      planes_count,
8539                                                      acrtc_state->stream,
8540                                                      &bundle->stream_update,
8541                                                      dc_state);
8542
8543                 /**
8544                  * Enable or disable the interrupts on the backend.
8545                  *
8546                  * Most pipes are put into power gating when unused.
8547                  *
8548                  * When power gating is enabled on a pipe we lose the
8549                  * interrupt enablement state when power gating is disabled.
8550                  *
8551                  * So we need to update the IRQ control state in hardware
8552                  * whenever the pipe turns on (since it could be previously
8553                  * power gated) or off (since some pipes can't be power gated
8554                  * on some ASICs).
8555                  */
8556                 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8557                         dm_update_pflip_irq_state(drm_to_adev(dev),
8558                                                   acrtc_attach);
8559
8560                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8561                                 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8562                                 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8563                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
8564                 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
8565                                 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8566                                 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
8567                         amdgpu_dm_psr_enable(acrtc_state->stream);
8568                 }
8569
8570                 mutex_unlock(&dm->dc_lock);
8571         }
8572
8573         /*
8574          * Update cursor state *after* programming all the planes.
8575          * This avoids redundant programming in the case where we're going
8576          * to be disabling a single plane - those pipes are being disabled.
8577          */
8578         if (acrtc_state->active_planes)
8579                 amdgpu_dm_commit_cursors(state);
8580
8581 cleanup:
8582         kfree(bundle);
8583 }
8584
8585 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8586                                    struct drm_atomic_state *state)
8587 {
8588         struct amdgpu_device *adev = drm_to_adev(dev);
8589         struct amdgpu_dm_connector *aconnector;
8590         struct drm_connector *connector;
8591         struct drm_connector_state *old_con_state, *new_con_state;
8592         struct drm_crtc_state *new_crtc_state;
8593         struct dm_crtc_state *new_dm_crtc_state;
8594         const struct dc_stream_status *status;
8595         int i, inst;
8596
8597         /* Notify device removals. */
8598         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8599                 if (old_con_state->crtc != new_con_state->crtc) {
8600                         /* CRTC changes require notification. */
8601                         goto notify;
8602                 }
8603
8604                 if (!new_con_state->crtc)
8605                         continue;
8606
8607                 new_crtc_state = drm_atomic_get_new_crtc_state(
8608                         state, new_con_state->crtc);
8609
8610                 if (!new_crtc_state)
8611                         continue;
8612
8613                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8614                         continue;
8615
8616         notify:
8617                 aconnector = to_amdgpu_dm_connector(connector);
8618
8619                 mutex_lock(&adev->dm.audio_lock);
8620                 inst = aconnector->audio_inst;
8621                 aconnector->audio_inst = -1;
8622                 mutex_unlock(&adev->dm.audio_lock);
8623
8624                 amdgpu_dm_audio_eld_notify(adev, inst);
8625         }
8626
8627         /* Notify audio device additions. */
8628         for_each_new_connector_in_state(state, connector, new_con_state, i) {
8629                 if (!new_con_state->crtc)
8630                         continue;
8631
8632                 new_crtc_state = drm_atomic_get_new_crtc_state(
8633                         state, new_con_state->crtc);
8634
8635                 if (!new_crtc_state)
8636                         continue;
8637
8638                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8639                         continue;
8640
8641                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8642                 if (!new_dm_crtc_state->stream)
8643                         continue;
8644
8645                 status = dc_stream_get_status(new_dm_crtc_state->stream);
8646                 if (!status)
8647                         continue;
8648
8649                 aconnector = to_amdgpu_dm_connector(connector);
8650
8651                 mutex_lock(&adev->dm.audio_lock);
8652                 inst = status->audio_inst;
8653                 aconnector->audio_inst = inst;
8654                 mutex_unlock(&adev->dm.audio_lock);
8655
8656                 amdgpu_dm_audio_eld_notify(adev, inst);
8657         }
8658 }
8659
8660 /*
8661  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8662  * @crtc_state: the DRM CRTC state
8663  * @stream_state: the DC stream state.
8664  *
8665  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8666  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8667  */
8668 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8669                                                 struct dc_stream_state *stream_state)
8670 {
8671         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8672 }
8673
8674 /**
8675  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8676  * @state: The atomic state to commit
8677  *
8678  * This will tell DC to commit the constructed DC state from atomic_check,
8679  * programming the hardware. Any failures here implies a hardware failure, since
8680  * atomic check should have filtered anything non-kosher.
8681  */
8682 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8683 {
8684         struct drm_device *dev = state->dev;
8685         struct amdgpu_device *adev = drm_to_adev(dev);
8686         struct amdgpu_display_manager *dm = &adev->dm;
8687         struct dm_atomic_state *dm_state;
8688         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8689         uint32_t i, j;
8690         struct drm_crtc *crtc;
8691         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8692         unsigned long flags;
8693         bool wait_for_vblank = true;
8694         struct drm_connector *connector;
8695         struct drm_connector_state *old_con_state, *new_con_state;
8696         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8697         int crtc_disable_count = 0;
8698         bool mode_set_reset_required = false;
8699
8700         trace_amdgpu_dm_atomic_commit_tail_begin(state);
8701
8702         drm_atomic_helper_update_legacy_modeset_state(dev, state);
8703
8704         dm_state = dm_atomic_get_new_state(state);
8705         if (dm_state && dm_state->context) {
8706                 dc_state = dm_state->context;
8707         } else {
8708                 /* No state changes, retain current state. */
8709                 dc_state_temp = dc_create_state(dm->dc);
8710                 ASSERT(dc_state_temp);
8711                 dc_state = dc_state_temp;
8712                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8713         }
8714
8715         for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8716                                        new_crtc_state, i) {
8717                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8718
8719                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8720
8721                 if (old_crtc_state->active &&
8722                     (!new_crtc_state->active ||
8723                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8724                         manage_dm_interrupts(adev, acrtc, false);
8725                         dc_stream_release(dm_old_crtc_state->stream);
8726                 }
8727         }
8728
8729         drm_atomic_helper_calc_timestamping_constants(state);
8730
8731         /* update changed items */
8732         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8733                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8734
8735                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8736                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8737
8738                 DRM_DEBUG_ATOMIC(
8739                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8740                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8741                         "connectors_changed:%d\n",
8742                         acrtc->crtc_id,
8743                         new_crtc_state->enable,
8744                         new_crtc_state->active,
8745                         new_crtc_state->planes_changed,
8746                         new_crtc_state->mode_changed,
8747                         new_crtc_state->active_changed,
8748                         new_crtc_state->connectors_changed);
8749
8750                 /* Disable cursor if disabling crtc */
8751                 if (old_crtc_state->active && !new_crtc_state->active) {
8752                         struct dc_cursor_position position;
8753
8754                         memset(&position, 0, sizeof(position));
8755                         mutex_lock(&dm->dc_lock);
8756                         dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8757                         mutex_unlock(&dm->dc_lock);
8758                 }
8759
8760                 /* Copy all transient state flags into dc state */
8761                 if (dm_new_crtc_state->stream) {
8762                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8763                                                             dm_new_crtc_state->stream);
8764                 }
8765
8766                 /* handles headless hotplug case, updating new_state and
8767                  * aconnector as needed
8768                  */
8769
8770                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8771
8772                         DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8773
8774                         if (!dm_new_crtc_state->stream) {
8775                                 /*
8776                                  * this could happen because of issues with
8777                                  * userspace notifications delivery.
8778                                  * In this case userspace tries to set mode on
8779                                  * display which is disconnected in fact.
8780                                  * dc_sink is NULL in this case on aconnector.
8781                                  * We expect reset mode will come soon.
8782                                  *
8783                                  * This can also happen when unplug is done
8784                                  * during resume sequence ended
8785                                  *
8786                                  * In this case, we want to pretend we still
8787                                  * have a sink to keep the pipe running so that
8788                                  * hw state is consistent with the sw state
8789                                  */
8790                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8791                                                 __func__, acrtc->base.base.id);
8792                                 continue;
8793                         }
8794
8795                         if (dm_old_crtc_state->stream)
8796                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8797
8798                         pm_runtime_get_noresume(dev->dev);
8799
8800                         acrtc->enabled = true;
8801                         acrtc->hw_mode = new_crtc_state->mode;
8802                         crtc->hwmode = new_crtc_state->mode;
8803                         mode_set_reset_required = true;
8804                 } else if (modereset_required(new_crtc_state)) {
8805                         DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8806                         /* i.e. reset mode */
8807                         if (dm_old_crtc_state->stream)
8808                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8809
8810                         mode_set_reset_required = true;
8811                 }
8812         } /* for_each_crtc_in_state() */
8813
8814         if (dc_state) {
8815                 /* if there mode set or reset, disable eDP PSR */
8816                 if (mode_set_reset_required)
8817                         amdgpu_dm_psr_disable_all(dm);
8818
8819                 dm_enable_per_frame_crtc_master_sync(dc_state);
8820                 mutex_lock(&dm->dc_lock);
8821                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
8822 #if defined(CONFIG_DRM_AMD_DC_DCN)
8823                /* Allow idle optimization when vblank count is 0 for display off */
8824                if (dm->active_vblank_irq_count == 0)
8825                    dc_allow_idle_optimizations(dm->dc,true);
8826 #endif
8827                 mutex_unlock(&dm->dc_lock);
8828         }
8829
8830         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8831                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8832
8833                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8834
8835                 if (dm_new_crtc_state->stream != NULL) {
8836                         const struct dc_stream_status *status =
8837                                         dc_stream_get_status(dm_new_crtc_state->stream);
8838
8839                         if (!status)
8840                                 status = dc_stream_get_status_from_state(dc_state,
8841                                                                          dm_new_crtc_state->stream);
8842                         if (!status)
8843                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8844                         else
8845                                 acrtc->otg_inst = status->primary_otg_inst;
8846                 }
8847         }
8848 #ifdef CONFIG_DRM_AMD_DC_HDCP
8849         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8850                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8851                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8852                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8853
8854                 new_crtc_state = NULL;
8855
8856                 if (acrtc)
8857                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8858
8859                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8860
8861                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8862                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8863                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8864                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8865                         dm_new_con_state->update_hdcp = true;
8866                         continue;
8867                 }
8868
8869                 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8870                         hdcp_update_display(
8871                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8872                                 new_con_state->hdcp_content_type,
8873                                 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
8874         }
8875 #endif
8876
8877         /* Handle connector state changes */
8878         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8879                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8880                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8881                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8882                 struct dc_surface_update dummy_updates[MAX_SURFACES];
8883                 struct dc_stream_update stream_update;
8884                 struct dc_info_packet hdr_packet;
8885                 struct dc_stream_status *status = NULL;
8886                 bool abm_changed, hdr_changed, scaling_changed;
8887
8888                 memset(&dummy_updates, 0, sizeof(dummy_updates));
8889                 memset(&stream_update, 0, sizeof(stream_update));
8890
8891                 if (acrtc) {
8892                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8893                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8894                 }
8895
8896                 /* Skip any modesets/resets */
8897                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8898                         continue;
8899
8900                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8901                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8902
8903                 scaling_changed = is_scaling_state_different(dm_new_con_state,
8904                                                              dm_old_con_state);
8905
8906                 abm_changed = dm_new_crtc_state->abm_level !=
8907                               dm_old_crtc_state->abm_level;
8908
8909                 hdr_changed =
8910                         !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
8911
8912                 if (!scaling_changed && !abm_changed && !hdr_changed)
8913                         continue;
8914
8915                 stream_update.stream = dm_new_crtc_state->stream;
8916                 if (scaling_changed) {
8917                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8918                                         dm_new_con_state, dm_new_crtc_state->stream);
8919
8920                         stream_update.src = dm_new_crtc_state->stream->src;
8921                         stream_update.dst = dm_new_crtc_state->stream->dst;
8922                 }
8923
8924                 if (abm_changed) {
8925                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8926
8927                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
8928                 }
8929
8930                 if (hdr_changed) {
8931                         fill_hdr_info_packet(new_con_state, &hdr_packet);
8932                         stream_update.hdr_static_metadata = &hdr_packet;
8933                 }
8934
8935                 status = dc_stream_get_status(dm_new_crtc_state->stream);
8936                 WARN_ON(!status);
8937                 WARN_ON(!status->plane_count);
8938
8939                 /*
8940                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
8941                  * Here we create an empty update on each plane.
8942                  * To fix this, DC should permit updating only stream properties.
8943                  */
8944                 for (j = 0; j < status->plane_count; j++)
8945                         dummy_updates[j].surface = status->plane_states[0];
8946
8947
8948                 mutex_lock(&dm->dc_lock);
8949                 dc_commit_updates_for_stream(dm->dc,
8950                                                      dummy_updates,
8951                                                      status->plane_count,
8952                                                      dm_new_crtc_state->stream,
8953                                                      &stream_update,
8954                                                      dc_state);
8955                 mutex_unlock(&dm->dc_lock);
8956         }
8957
8958         /* Count number of newly disabled CRTCs for dropping PM refs later. */
8959         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8960                                       new_crtc_state, i) {
8961                 if (old_crtc_state->active && !new_crtc_state->active)
8962                         crtc_disable_count++;
8963
8964                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8965                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8966
8967                 /* For freesync config update on crtc state and params for irq */
8968                 update_stream_irq_parameters(dm, dm_new_crtc_state);
8969
8970                 /* Handle vrr on->off / off->on transitions */
8971                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8972                                                 dm_new_crtc_state);
8973         }
8974
8975         /**
8976          * Enable interrupts for CRTCs that are newly enabled or went through
8977          * a modeset. It was intentionally deferred until after the front end
8978          * state was modified to wait until the OTG was on and so the IRQ
8979          * handlers didn't access stale or invalid state.
8980          */
8981         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8982                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8983 #ifdef CONFIG_DEBUG_FS
8984                 bool configure_crc = false;
8985                 enum amdgpu_dm_pipe_crc_source cur_crc_src;
8986 #endif
8987                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8988
8989                 if (new_crtc_state->active &&
8990                     (!old_crtc_state->active ||
8991                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8992                         dc_stream_retain(dm_new_crtc_state->stream);
8993                         acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8994                         manage_dm_interrupts(adev, acrtc, true);
8995
8996 #ifdef CONFIG_DEBUG_FS
8997                         /**
8998                          * Frontend may have changed so reapply the CRC capture
8999                          * settings for the stream.
9000                          */
9001                         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9002                         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9003                         cur_crc_src = acrtc->dm_irq_params.crc_src;
9004                         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9005
9006                         if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9007                                 configure_crc = true;
9008 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9009                                 if (amdgpu_dm_crc_window_is_activated(crtc))
9010                                         configure_crc = false;
9011 #endif
9012                         }
9013
9014                         if (configure_crc)
9015                                 amdgpu_dm_crtc_configure_crc_source(
9016                                         crtc, dm_new_crtc_state, cur_crc_src);
9017 #endif
9018                 }
9019         }
9020
9021         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9022                 if (new_crtc_state->async_flip)
9023                         wait_for_vblank = false;
9024
9025         /* update planes when needed per crtc*/
9026         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9027                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9028
9029                 if (dm_new_crtc_state->stream)
9030                         amdgpu_dm_commit_planes(state, dc_state, dev,
9031                                                 dm, crtc, wait_for_vblank);
9032         }
9033
9034         /* Update audio instances for each connector. */
9035         amdgpu_dm_commit_audio(dev, state);
9036
9037         /*
9038          * send vblank event on all events not handled in flip and
9039          * mark consumed event for drm_atomic_helper_commit_hw_done
9040          */
9041         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9042         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9043
9044                 if (new_crtc_state->event)
9045                         drm_send_event_locked(dev, &new_crtc_state->event->base);
9046
9047                 new_crtc_state->event = NULL;
9048         }
9049         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9050
9051         /* Signal HW programming completion */
9052         drm_atomic_helper_commit_hw_done(state);
9053
9054         if (wait_for_vblank)
9055                 drm_atomic_helper_wait_for_flip_done(dev, state);
9056
9057         drm_atomic_helper_cleanup_planes(dev, state);
9058
9059         /* return the stolen vga memory back to VRAM */
9060         if (!adev->mman.keep_stolen_vga_memory)
9061                 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9062         amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9063
9064         /*
9065          * Finally, drop a runtime PM reference for each newly disabled CRTC,
9066          * so we can put the GPU into runtime suspend if we're not driving any
9067          * displays anymore
9068          */
9069         for (i = 0; i < crtc_disable_count; i++)
9070                 pm_runtime_put_autosuspend(dev->dev);
9071         pm_runtime_mark_last_busy(dev->dev);
9072
9073         if (dc_state_temp)
9074                 dc_release_state(dc_state_temp);
9075 }
9076
9077
9078 static int dm_force_atomic_commit(struct drm_connector *connector)
9079 {
9080         int ret = 0;
9081         struct drm_device *ddev = connector->dev;
9082         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9083         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9084         struct drm_plane *plane = disconnected_acrtc->base.primary;
9085         struct drm_connector_state *conn_state;
9086         struct drm_crtc_state *crtc_state;
9087         struct drm_plane_state *plane_state;
9088
9089         if (!state)
9090                 return -ENOMEM;
9091
9092         state->acquire_ctx = ddev->mode_config.acquire_ctx;
9093
9094         /* Construct an atomic state to restore previous display setting */
9095
9096         /*
9097          * Attach connectors to drm_atomic_state
9098          */
9099         conn_state = drm_atomic_get_connector_state(state, connector);
9100
9101         ret = PTR_ERR_OR_ZERO(conn_state);
9102         if (ret)
9103                 goto out;
9104
9105         /* Attach crtc to drm_atomic_state*/
9106         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9107
9108         ret = PTR_ERR_OR_ZERO(crtc_state);
9109         if (ret)
9110                 goto out;
9111
9112         /* force a restore */
9113         crtc_state->mode_changed = true;
9114
9115         /* Attach plane to drm_atomic_state */
9116         plane_state = drm_atomic_get_plane_state(state, plane);
9117
9118         ret = PTR_ERR_OR_ZERO(plane_state);
9119         if (ret)
9120                 goto out;
9121
9122         /* Call commit internally with the state we just constructed */
9123         ret = drm_atomic_commit(state);
9124
9125 out:
9126         drm_atomic_state_put(state);
9127         if (ret)
9128                 DRM_ERROR("Restoring old state failed with %i\n", ret);
9129
9130         return ret;
9131 }
9132
9133 /*
9134  * This function handles all cases when set mode does not come upon hotplug.
9135  * This includes when a display is unplugged then plugged back into the
9136  * same port and when running without usermode desktop manager supprot
9137  */
9138 void dm_restore_drm_connector_state(struct drm_device *dev,
9139                                     struct drm_connector *connector)
9140 {
9141         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9142         struct amdgpu_crtc *disconnected_acrtc;
9143         struct dm_crtc_state *acrtc_state;
9144
9145         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9146                 return;
9147
9148         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9149         if (!disconnected_acrtc)
9150                 return;
9151
9152         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9153         if (!acrtc_state->stream)
9154                 return;
9155
9156         /*
9157          * If the previous sink is not released and different from the current,
9158          * we deduce we are in a state where we can not rely on usermode call
9159          * to turn on the display, so we do it here
9160          */
9161         if (acrtc_state->stream->sink != aconnector->dc_sink)
9162                 dm_force_atomic_commit(&aconnector->base);
9163 }
9164
9165 /*
9166  * Grabs all modesetting locks to serialize against any blocking commits,
9167  * Waits for completion of all non blocking commits.
9168  */
9169 static int do_aquire_global_lock(struct drm_device *dev,
9170                                  struct drm_atomic_state *state)
9171 {
9172         struct drm_crtc *crtc;
9173         struct drm_crtc_commit *commit;
9174         long ret;
9175
9176         /*
9177          * Adding all modeset locks to aquire_ctx will
9178          * ensure that when the framework release it the
9179          * extra locks we are locking here will get released to
9180          */
9181         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9182         if (ret)
9183                 return ret;
9184
9185         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9186                 spin_lock(&crtc->commit_lock);
9187                 commit = list_first_entry_or_null(&crtc->commit_list,
9188                                 struct drm_crtc_commit, commit_entry);
9189                 if (commit)
9190                         drm_crtc_commit_get(commit);
9191                 spin_unlock(&crtc->commit_lock);
9192
9193                 if (!commit)
9194                         continue;
9195
9196                 /*
9197                  * Make sure all pending HW programming completed and
9198                  * page flips done
9199                  */
9200                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9201
9202                 if (ret > 0)
9203                         ret = wait_for_completion_interruptible_timeout(
9204                                         &commit->flip_done, 10*HZ);
9205
9206                 if (ret == 0)
9207                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9208                                   "timed out\n", crtc->base.id, crtc->name);
9209
9210                 drm_crtc_commit_put(commit);
9211         }
9212
9213         return ret < 0 ? ret : 0;
9214 }
9215
9216 static void get_freesync_config_for_crtc(
9217         struct dm_crtc_state *new_crtc_state,
9218         struct dm_connector_state *new_con_state)
9219 {
9220         struct mod_freesync_config config = {0};
9221         struct amdgpu_dm_connector *aconnector =
9222                         to_amdgpu_dm_connector(new_con_state->base.connector);
9223         struct drm_display_mode *mode = &new_crtc_state->base.mode;
9224         int vrefresh = drm_mode_vrefresh(mode);
9225         bool fs_vid_mode = false;
9226
9227         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9228                                         vrefresh >= aconnector->min_vfreq &&
9229                                         vrefresh <= aconnector->max_vfreq;
9230
9231         if (new_crtc_state->vrr_supported) {
9232                 new_crtc_state->stream->ignore_msa_timing_param = true;
9233                 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9234
9235                 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9236                 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9237                 config.vsif_supported = true;
9238                 config.btr = true;
9239
9240                 if (fs_vid_mode) {
9241                         config.state = VRR_STATE_ACTIVE_FIXED;
9242                         config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9243                         goto out;
9244                 } else if (new_crtc_state->base.vrr_enabled) {
9245                         config.state = VRR_STATE_ACTIVE_VARIABLE;
9246                 } else {
9247                         config.state = VRR_STATE_INACTIVE;
9248                 }
9249         }
9250 out:
9251         new_crtc_state->freesync_config = config;
9252 }
9253
9254 static void reset_freesync_config_for_crtc(
9255         struct dm_crtc_state *new_crtc_state)
9256 {
9257         new_crtc_state->vrr_supported = false;
9258
9259         memset(&new_crtc_state->vrr_infopacket, 0,
9260                sizeof(new_crtc_state->vrr_infopacket));
9261 }
9262
9263 static bool
9264 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9265                                  struct drm_crtc_state *new_crtc_state)
9266 {
9267         struct drm_display_mode old_mode, new_mode;
9268
9269         if (!old_crtc_state || !new_crtc_state)
9270                 return false;
9271
9272         old_mode = old_crtc_state->mode;
9273         new_mode = new_crtc_state->mode;
9274
9275         if (old_mode.clock       == new_mode.clock &&
9276             old_mode.hdisplay    == new_mode.hdisplay &&
9277             old_mode.vdisplay    == new_mode.vdisplay &&
9278             old_mode.htotal      == new_mode.htotal &&
9279             old_mode.vtotal      != new_mode.vtotal &&
9280             old_mode.hsync_start == new_mode.hsync_start &&
9281             old_mode.vsync_start != new_mode.vsync_start &&
9282             old_mode.hsync_end   == new_mode.hsync_end &&
9283             old_mode.vsync_end   != new_mode.vsync_end &&
9284             old_mode.hskew       == new_mode.hskew &&
9285             old_mode.vscan       == new_mode.vscan &&
9286             (old_mode.vsync_end - old_mode.vsync_start) ==
9287             (new_mode.vsync_end - new_mode.vsync_start))
9288                 return true;
9289
9290         return false;
9291 }
9292
9293 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9294         uint64_t num, den, res;
9295         struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9296
9297         dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9298
9299         num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9300         den = (unsigned long long)new_crtc_state->mode.htotal *
9301               (unsigned long long)new_crtc_state->mode.vtotal;
9302
9303         res = div_u64(num, den);
9304         dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9305 }
9306
9307 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9308                                 struct drm_atomic_state *state,
9309                                 struct drm_crtc *crtc,
9310                                 struct drm_crtc_state *old_crtc_state,
9311                                 struct drm_crtc_state *new_crtc_state,
9312                                 bool enable,
9313                                 bool *lock_and_validation_needed)
9314 {
9315         struct dm_atomic_state *dm_state = NULL;
9316         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9317         struct dc_stream_state *new_stream;
9318         int ret = 0;
9319
9320         /*
9321          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9322          * update changed items
9323          */
9324         struct amdgpu_crtc *acrtc = NULL;
9325         struct amdgpu_dm_connector *aconnector = NULL;
9326         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9327         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
9328
9329         new_stream = NULL;
9330
9331         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9332         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9333         acrtc = to_amdgpu_crtc(crtc);
9334         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
9335
9336         /* TODO This hack should go away */
9337         if (aconnector && enable) {
9338                 /* Make sure fake sink is created in plug-in scenario */
9339                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9340                                                             &aconnector->base);
9341                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9342                                                             &aconnector->base);
9343
9344                 if (IS_ERR(drm_new_conn_state)) {
9345                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9346                         goto fail;
9347                 }
9348
9349                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9350                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
9351
9352                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9353                         goto skip_modeset;
9354
9355                 new_stream = create_validate_stream_for_sink(aconnector,
9356                                                              &new_crtc_state->mode,
9357                                                              dm_new_conn_state,
9358                                                              dm_old_crtc_state->stream);
9359
9360                 /*
9361                  * we can have no stream on ACTION_SET if a display
9362                  * was disconnected during S3, in this case it is not an
9363                  * error, the OS will be updated after detection, and
9364                  * will do the right thing on next atomic commit
9365                  */
9366
9367                 if (!new_stream) {
9368                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9369                                         __func__, acrtc->base.base.id);
9370                         ret = -ENOMEM;
9371                         goto fail;
9372                 }
9373
9374                 /*
9375                  * TODO: Check VSDB bits to decide whether this should
9376                  * be enabled or not.
9377                  */
9378                 new_stream->triggered_crtc_reset.enabled =
9379                         dm->force_timing_sync;
9380
9381                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9382
9383                 ret = fill_hdr_info_packet(drm_new_conn_state,
9384                                            &new_stream->hdr_static_metadata);
9385                 if (ret)
9386                         goto fail;
9387
9388                 /*
9389                  * If we already removed the old stream from the context
9390                  * (and set the new stream to NULL) then we can't reuse
9391                  * the old stream even if the stream and scaling are unchanged.
9392                  * We'll hit the BUG_ON and black screen.
9393                  *
9394                  * TODO: Refactor this function to allow this check to work
9395                  * in all conditions.
9396                  */
9397                 if (amdgpu_freesync_vid_mode &&
9398                     dm_new_crtc_state->stream &&
9399                     is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9400                         goto skip_modeset;
9401
9402                 if (dm_new_crtc_state->stream &&
9403                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9404                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9405                         new_crtc_state->mode_changed = false;
9406                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9407                                          new_crtc_state->mode_changed);
9408                 }
9409         }
9410
9411         /* mode_changed flag may get updated above, need to check again */
9412         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9413                 goto skip_modeset;
9414
9415         DRM_DEBUG_ATOMIC(
9416                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9417                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9418                 "connectors_changed:%d\n",
9419                 acrtc->crtc_id,
9420                 new_crtc_state->enable,
9421                 new_crtc_state->active,
9422                 new_crtc_state->planes_changed,
9423                 new_crtc_state->mode_changed,
9424                 new_crtc_state->active_changed,
9425                 new_crtc_state->connectors_changed);
9426
9427         /* Remove stream for any changed/disabled CRTC */
9428         if (!enable) {
9429
9430                 if (!dm_old_crtc_state->stream)
9431                         goto skip_modeset;
9432
9433                 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
9434                     is_timing_unchanged_for_freesync(new_crtc_state,
9435                                                      old_crtc_state)) {
9436                         new_crtc_state->mode_changed = false;
9437                         DRM_DEBUG_DRIVER(
9438                                 "Mode change not required for front porch change, "
9439                                 "setting mode_changed to %d",
9440                                 new_crtc_state->mode_changed);
9441
9442                         set_freesync_fixed_config(dm_new_crtc_state);
9443
9444                         goto skip_modeset;
9445                 } else if (amdgpu_freesync_vid_mode && aconnector &&
9446                            is_freesync_video_mode(&new_crtc_state->mode,
9447                                                   aconnector)) {
9448                         set_freesync_fixed_config(dm_new_crtc_state);
9449                 }
9450
9451                 ret = dm_atomic_get_state(state, &dm_state);
9452                 if (ret)
9453                         goto fail;
9454
9455                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9456                                 crtc->base.id);
9457
9458                 /* i.e. reset mode */
9459                 if (dc_remove_stream_from_ctx(
9460                                 dm->dc,
9461                                 dm_state->context,
9462                                 dm_old_crtc_state->stream) != DC_OK) {
9463                         ret = -EINVAL;
9464                         goto fail;
9465                 }
9466
9467                 dc_stream_release(dm_old_crtc_state->stream);
9468                 dm_new_crtc_state->stream = NULL;
9469
9470                 reset_freesync_config_for_crtc(dm_new_crtc_state);
9471
9472                 *lock_and_validation_needed = true;
9473
9474         } else {/* Add stream for any updated/enabled CRTC */
9475                 /*
9476                  * Quick fix to prevent NULL pointer on new_stream when
9477                  * added MST connectors not found in existing crtc_state in the chained mode
9478                  * TODO: need to dig out the root cause of that
9479                  */
9480                 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
9481                         goto skip_modeset;
9482
9483                 if (modereset_required(new_crtc_state))
9484                         goto skip_modeset;
9485
9486                 if (modeset_required(new_crtc_state, new_stream,
9487                                      dm_old_crtc_state->stream)) {
9488
9489                         WARN_ON(dm_new_crtc_state->stream);
9490
9491                         ret = dm_atomic_get_state(state, &dm_state);
9492                         if (ret)
9493                                 goto fail;
9494
9495                         dm_new_crtc_state->stream = new_stream;
9496
9497                         dc_stream_retain(new_stream);
9498
9499                         DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
9500                                          crtc->base.id);
9501
9502                         if (dc_add_stream_to_ctx(
9503                                         dm->dc,
9504                                         dm_state->context,
9505                                         dm_new_crtc_state->stream) != DC_OK) {
9506                                 ret = -EINVAL;
9507                                 goto fail;
9508                         }
9509
9510                         *lock_and_validation_needed = true;
9511                 }
9512         }
9513
9514 skip_modeset:
9515         /* Release extra reference */
9516         if (new_stream)
9517                  dc_stream_release(new_stream);
9518
9519         /*
9520          * We want to do dc stream updates that do not require a
9521          * full modeset below.
9522          */
9523         if (!(enable && aconnector && new_crtc_state->active))
9524                 return 0;
9525         /*
9526          * Given above conditions, the dc state cannot be NULL because:
9527          * 1. We're in the process of enabling CRTCs (just been added
9528          *    to the dc context, or already is on the context)
9529          * 2. Has a valid connector attached, and
9530          * 3. Is currently active and enabled.
9531          * => The dc stream state currently exists.
9532          */
9533         BUG_ON(dm_new_crtc_state->stream == NULL);
9534
9535         /* Scaling or underscan settings */
9536         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
9537                 update_stream_scaling_settings(
9538                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
9539
9540         /* ABM settings */
9541         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9542
9543         /*
9544          * Color management settings. We also update color properties
9545          * when a modeset is needed, to ensure it gets reprogrammed.
9546          */
9547         if (dm_new_crtc_state->base.color_mgmt_changed ||
9548             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9549                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
9550                 if (ret)
9551                         goto fail;
9552         }
9553
9554         /* Update Freesync settings. */
9555         get_freesync_config_for_crtc(dm_new_crtc_state,
9556                                      dm_new_conn_state);
9557
9558         return ret;
9559
9560 fail:
9561         if (new_stream)
9562                 dc_stream_release(new_stream);
9563         return ret;
9564 }
9565
9566 static bool should_reset_plane(struct drm_atomic_state *state,
9567                                struct drm_plane *plane,
9568                                struct drm_plane_state *old_plane_state,
9569                                struct drm_plane_state *new_plane_state)
9570 {
9571         struct drm_plane *other;
9572         struct drm_plane_state *old_other_state, *new_other_state;
9573         struct drm_crtc_state *new_crtc_state;
9574         int i;
9575
9576         /*
9577          * TODO: Remove this hack once the checks below are sufficient
9578          * enough to determine when we need to reset all the planes on
9579          * the stream.
9580          */
9581         if (state->allow_modeset)
9582                 return true;
9583
9584         /* Exit early if we know that we're adding or removing the plane. */
9585         if (old_plane_state->crtc != new_plane_state->crtc)
9586                 return true;
9587
9588         /* old crtc == new_crtc == NULL, plane not in context. */
9589         if (!new_plane_state->crtc)
9590                 return false;
9591
9592         new_crtc_state =
9593                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9594
9595         if (!new_crtc_state)
9596                 return true;
9597
9598         /* CRTC Degamma changes currently require us to recreate planes. */
9599         if (new_crtc_state->color_mgmt_changed)
9600                 return true;
9601
9602         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9603                 return true;
9604
9605         /*
9606          * If there are any new primary or overlay planes being added or
9607          * removed then the z-order can potentially change. To ensure
9608          * correct z-order and pipe acquisition the current DC architecture
9609          * requires us to remove and recreate all existing planes.
9610          *
9611          * TODO: Come up with a more elegant solution for this.
9612          */
9613         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9614                 struct amdgpu_framebuffer *old_afb, *new_afb;
9615                 if (other->type == DRM_PLANE_TYPE_CURSOR)
9616                         continue;
9617
9618                 if (old_other_state->crtc != new_plane_state->crtc &&
9619                     new_other_state->crtc != new_plane_state->crtc)
9620                         continue;
9621
9622                 if (old_other_state->crtc != new_other_state->crtc)
9623                         return true;
9624
9625                 /* Src/dst size and scaling updates. */
9626                 if (old_other_state->src_w != new_other_state->src_w ||
9627                     old_other_state->src_h != new_other_state->src_h ||
9628                     old_other_state->crtc_w != new_other_state->crtc_w ||
9629                     old_other_state->crtc_h != new_other_state->crtc_h)
9630                         return true;
9631
9632                 /* Rotation / mirroring updates. */
9633                 if (old_other_state->rotation != new_other_state->rotation)
9634                         return true;
9635
9636                 /* Blending updates. */
9637                 if (old_other_state->pixel_blend_mode !=
9638                     new_other_state->pixel_blend_mode)
9639                         return true;
9640
9641                 /* Alpha updates. */
9642                 if (old_other_state->alpha != new_other_state->alpha)
9643                         return true;
9644
9645                 /* Colorspace changes. */
9646                 if (old_other_state->color_range != new_other_state->color_range ||
9647                     old_other_state->color_encoding != new_other_state->color_encoding)
9648                         return true;
9649
9650                 /* Framebuffer checks fall at the end. */
9651                 if (!old_other_state->fb || !new_other_state->fb)
9652                         continue;
9653
9654                 /* Pixel format changes can require bandwidth updates. */
9655                 if (old_other_state->fb->format != new_other_state->fb->format)
9656                         return true;
9657
9658                 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9659                 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9660
9661                 /* Tiling and DCC changes also require bandwidth updates. */
9662                 if (old_afb->tiling_flags != new_afb->tiling_flags ||
9663                     old_afb->base.modifier != new_afb->base.modifier)
9664                         return true;
9665         }
9666
9667         return false;
9668 }
9669
9670 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9671                               struct drm_plane_state *new_plane_state,
9672                               struct drm_framebuffer *fb)
9673 {
9674         struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9675         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9676         unsigned int pitch;
9677         bool linear;
9678
9679         if (fb->width > new_acrtc->max_cursor_width ||
9680             fb->height > new_acrtc->max_cursor_height) {
9681                 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9682                                  new_plane_state->fb->width,
9683                                  new_plane_state->fb->height);
9684                 return -EINVAL;
9685         }
9686         if (new_plane_state->src_w != fb->width << 16 ||
9687             new_plane_state->src_h != fb->height << 16) {
9688                 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9689                 return -EINVAL;
9690         }
9691
9692         /* Pitch in pixels */
9693         pitch = fb->pitches[0] / fb->format->cpp[0];
9694
9695         if (fb->width != pitch) {
9696                 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9697                                  fb->width, pitch);
9698                 return -EINVAL;
9699         }
9700
9701         switch (pitch) {
9702         case 64:
9703         case 128:
9704         case 256:
9705                 /* FB pitch is supported by cursor plane */
9706                 break;
9707         default:
9708                 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9709                 return -EINVAL;
9710         }
9711
9712         /* Core DRM takes care of checking FB modifiers, so we only need to
9713          * check tiling flags when the FB doesn't have a modifier. */
9714         if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9715                 if (adev->family < AMDGPU_FAMILY_AI) {
9716                         linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9717                                  AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9718                                  AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9719                 } else {
9720                         linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9721                 }
9722                 if (!linear) {
9723                         DRM_DEBUG_ATOMIC("Cursor FB not linear");
9724                         return -EINVAL;
9725                 }
9726         }
9727
9728         return 0;
9729 }
9730
9731 static int dm_update_plane_state(struct dc *dc,
9732                                  struct drm_atomic_state *state,
9733                                  struct drm_plane *plane,
9734                                  struct drm_plane_state *old_plane_state,
9735                                  struct drm_plane_state *new_plane_state,
9736                                  bool enable,
9737                                  bool *lock_and_validation_needed)
9738 {
9739
9740         struct dm_atomic_state *dm_state = NULL;
9741         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9742         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9743         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9744         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9745         struct amdgpu_crtc *new_acrtc;
9746         bool needs_reset;
9747         int ret = 0;
9748
9749
9750         new_plane_crtc = new_plane_state->crtc;
9751         old_plane_crtc = old_plane_state->crtc;
9752         dm_new_plane_state = to_dm_plane_state(new_plane_state);
9753         dm_old_plane_state = to_dm_plane_state(old_plane_state);
9754
9755         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9756                 if (!enable || !new_plane_crtc ||
9757                         drm_atomic_plane_disabling(plane->state, new_plane_state))
9758                         return 0;
9759
9760                 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9761
9762                 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9763                         DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9764                         return -EINVAL;
9765                 }
9766
9767                 if (new_plane_state->fb) {
9768                         ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9769                                                  new_plane_state->fb);
9770                         if (ret)
9771                                 return ret;
9772                 }
9773
9774                 return 0;
9775         }
9776
9777         needs_reset = should_reset_plane(state, plane, old_plane_state,
9778                                          new_plane_state);
9779
9780         /* Remove any changed/removed planes */
9781         if (!enable) {
9782                 if (!needs_reset)
9783                         return 0;
9784
9785                 if (!old_plane_crtc)
9786                         return 0;
9787
9788                 old_crtc_state = drm_atomic_get_old_crtc_state(
9789                                 state, old_plane_crtc);
9790                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9791
9792                 if (!dm_old_crtc_state->stream)
9793                         return 0;
9794
9795                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9796                                 plane->base.id, old_plane_crtc->base.id);
9797
9798                 ret = dm_atomic_get_state(state, &dm_state);
9799                 if (ret)
9800                         return ret;
9801
9802                 if (!dc_remove_plane_from_context(
9803                                 dc,
9804                                 dm_old_crtc_state->stream,
9805                                 dm_old_plane_state->dc_state,
9806                                 dm_state->context)) {
9807
9808                         return -EINVAL;
9809                 }
9810
9811
9812                 dc_plane_state_release(dm_old_plane_state->dc_state);
9813                 dm_new_plane_state->dc_state = NULL;
9814
9815                 *lock_and_validation_needed = true;
9816
9817         } else { /* Add new planes */
9818                 struct dc_plane_state *dc_new_plane_state;
9819
9820                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9821                         return 0;
9822
9823                 if (!new_plane_crtc)
9824                         return 0;
9825
9826                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9827                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9828
9829                 if (!dm_new_crtc_state->stream)
9830                         return 0;
9831
9832                 if (!needs_reset)
9833                         return 0;
9834
9835                 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9836                 if (ret)
9837                         return ret;
9838
9839                 WARN_ON(dm_new_plane_state->dc_state);
9840
9841                 dc_new_plane_state = dc_create_plane_state(dc);
9842                 if (!dc_new_plane_state)
9843                         return -ENOMEM;
9844
9845                 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
9846                                  plane->base.id, new_plane_crtc->base.id);
9847
9848                 ret = fill_dc_plane_attributes(
9849                         drm_to_adev(new_plane_crtc->dev),
9850                         dc_new_plane_state,
9851                         new_plane_state,
9852                         new_crtc_state);
9853                 if (ret) {
9854                         dc_plane_state_release(dc_new_plane_state);
9855                         return ret;
9856                 }
9857
9858                 ret = dm_atomic_get_state(state, &dm_state);
9859                 if (ret) {
9860                         dc_plane_state_release(dc_new_plane_state);
9861                         return ret;
9862                 }
9863
9864                 /*
9865                  * Any atomic check errors that occur after this will
9866                  * not need a release. The plane state will be attached
9867                  * to the stream, and therefore part of the atomic
9868                  * state. It'll be released when the atomic state is
9869                  * cleaned.
9870                  */
9871                 if (!dc_add_plane_to_context(
9872                                 dc,
9873                                 dm_new_crtc_state->stream,
9874                                 dc_new_plane_state,
9875                                 dm_state->context)) {
9876
9877                         dc_plane_state_release(dc_new_plane_state);
9878                         return -EINVAL;
9879                 }
9880
9881                 dm_new_plane_state->dc_state = dc_new_plane_state;
9882
9883                 /* Tell DC to do a full surface update every time there
9884                  * is a plane change. Inefficient, but works for now.
9885                  */
9886                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9887
9888                 *lock_and_validation_needed = true;
9889         }
9890
9891
9892         return ret;
9893 }
9894
9895 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9896                                 struct drm_crtc *crtc,
9897                                 struct drm_crtc_state *new_crtc_state)
9898 {
9899         struct drm_plane_state *new_cursor_state, *new_primary_state;
9900         int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9901
9902         /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9903          * cursor per pipe but it's going to inherit the scaling and
9904          * positioning from the underlying pipe. Check the cursor plane's
9905          * blending properties match the primary plane's. */
9906
9907         new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9908         new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9909         if (!new_cursor_state || !new_primary_state ||
9910             !new_cursor_state->fb || !new_primary_state->fb) {
9911                 return 0;
9912         }
9913
9914         cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9915                          (new_cursor_state->src_w >> 16);
9916         cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9917                          (new_cursor_state->src_h >> 16);
9918
9919         primary_scale_w = new_primary_state->crtc_w * 1000 /
9920                          (new_primary_state->src_w >> 16);
9921         primary_scale_h = new_primary_state->crtc_h * 1000 /
9922                          (new_primary_state->src_h >> 16);
9923
9924         if (cursor_scale_w != primary_scale_w ||
9925             cursor_scale_h != primary_scale_h) {
9926                 DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9927                 return -EINVAL;
9928         }
9929
9930         return 0;
9931 }
9932
9933 #if defined(CONFIG_DRM_AMD_DC_DCN)
9934 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9935 {
9936         struct drm_connector *connector;
9937         struct drm_connector_state *conn_state;
9938         struct amdgpu_dm_connector *aconnector = NULL;
9939         int i;
9940         for_each_new_connector_in_state(state, connector, conn_state, i) {
9941                 if (conn_state->crtc != crtc)
9942                         continue;
9943
9944                 aconnector = to_amdgpu_dm_connector(connector);
9945                 if (!aconnector->port || !aconnector->mst_port)
9946                         aconnector = NULL;
9947                 else
9948                         break;
9949         }
9950
9951         if (!aconnector)
9952                 return 0;
9953
9954         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9955 }
9956 #endif
9957
9958 static int validate_overlay(struct drm_atomic_state *state)
9959 {
9960         int i;
9961         struct drm_plane *plane;
9962         struct drm_plane_state *old_plane_state, *new_plane_state;
9963         struct drm_plane_state *primary_state, *overlay_state = NULL;
9964
9965         /* Check if primary plane is contained inside overlay */
9966         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9967                 if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
9968                         if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9969                                 return 0;
9970
9971                         overlay_state = new_plane_state;
9972                         continue;
9973                 }
9974         }
9975
9976         /* check if we're making changes to the overlay plane */
9977         if (!overlay_state)
9978                 return 0;
9979
9980         /* check if overlay plane is enabled */
9981         if (!overlay_state->crtc)
9982                 return 0;
9983
9984         /* find the primary plane for the CRTC that the overlay is enabled on */
9985         primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
9986         if (IS_ERR(primary_state))
9987                 return PTR_ERR(primary_state);
9988
9989         /* check if primary plane is enabled */
9990         if (!primary_state->crtc)
9991                 return 0;
9992
9993         /* Perform the bounds check to ensure the overlay plane covers the primary */
9994         if (primary_state->crtc_x < overlay_state->crtc_x ||
9995             primary_state->crtc_y < overlay_state->crtc_y ||
9996             primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
9997             primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
9998                 DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
9999                 return -EINVAL;
10000         }
10001
10002         return 0;
10003 }
10004
10005 /**
10006  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10007  * @dev: The DRM device
10008  * @state: The atomic state to commit
10009  *
10010  * Validate that the given atomic state is programmable by DC into hardware.
10011  * This involves constructing a &struct dc_state reflecting the new hardware
10012  * state we wish to commit, then querying DC to see if it is programmable. It's
10013  * important not to modify the existing DC state. Otherwise, atomic_check
10014  * may unexpectedly commit hardware changes.
10015  *
10016  * When validating the DC state, it's important that the right locks are
10017  * acquired. For full updates case which removes/adds/updates streams on one
10018  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10019  * that any such full update commit will wait for completion of any outstanding
10020  * flip using DRMs synchronization events.
10021  *
10022  * Note that DM adds the affected connectors for all CRTCs in state, when that
10023  * might not seem necessary. This is because DC stream creation requires the
10024  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10025  * be possible but non-trivial - a possible TODO item.
10026  *
10027  * Return: -Error code if validation failed.
10028  */
10029 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10030                                   struct drm_atomic_state *state)
10031 {
10032         struct amdgpu_device *adev = drm_to_adev(dev);
10033         struct dm_atomic_state *dm_state = NULL;
10034         struct dc *dc = adev->dm.dc;
10035         struct drm_connector *connector;
10036         struct drm_connector_state *old_con_state, *new_con_state;
10037         struct drm_crtc *crtc;
10038         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10039         struct drm_plane *plane;
10040         struct drm_plane_state *old_plane_state, *new_plane_state;
10041         enum dc_status status;
10042         int ret, i;
10043         bool lock_and_validation_needed = false;
10044         struct dm_crtc_state *dm_old_crtc_state;
10045
10046         trace_amdgpu_dm_atomic_check_begin(state);
10047
10048         ret = drm_atomic_helper_check_modeset(dev, state);
10049         if (ret)
10050                 goto fail;
10051
10052         /* Check connector changes */
10053         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10054                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10055                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10056
10057                 /* Skip connectors that are disabled or part of modeset already. */
10058                 if (!old_con_state->crtc && !new_con_state->crtc)
10059                         continue;
10060
10061                 if (!new_con_state->crtc)
10062                         continue;
10063
10064                 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10065                 if (IS_ERR(new_crtc_state)) {
10066                         ret = PTR_ERR(new_crtc_state);
10067                         goto fail;
10068                 }
10069
10070                 if (dm_old_con_state->abm_level !=
10071                     dm_new_con_state->abm_level)
10072                         new_crtc_state->connectors_changed = true;
10073         }
10074
10075 #if defined(CONFIG_DRM_AMD_DC_DCN)
10076         if (dc_resource_is_dsc_encoding_supported(dc)) {
10077                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10078                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10079                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
10080                                 if (ret)
10081                                         goto fail;
10082                         }
10083                 }
10084         }
10085 #endif
10086         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10087                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10088
10089                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10090                     !new_crtc_state->color_mgmt_changed &&
10091                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10092                         dm_old_crtc_state->dsc_force_changed == false)
10093                         continue;
10094
10095                 if (!new_crtc_state->enable)
10096                         continue;
10097
10098                 ret = drm_atomic_add_affected_connectors(state, crtc);
10099                 if (ret)
10100                         return ret;
10101
10102                 ret = drm_atomic_add_affected_planes(state, crtc);
10103                 if (ret)
10104                         goto fail;
10105
10106                 if (dm_old_crtc_state->dsc_force_changed)
10107                         new_crtc_state->mode_changed = true;
10108         }
10109
10110         /*
10111          * Add all primary and overlay planes on the CRTC to the state
10112          * whenever a plane is enabled to maintain correct z-ordering
10113          * and to enable fast surface updates.
10114          */
10115         drm_for_each_crtc(crtc, dev) {
10116                 bool modified = false;
10117
10118                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10119                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
10120                                 continue;
10121
10122                         if (new_plane_state->crtc == crtc ||
10123                             old_plane_state->crtc == crtc) {
10124                                 modified = true;
10125                                 break;
10126                         }
10127                 }
10128
10129                 if (!modified)
10130                         continue;
10131
10132                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10133                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
10134                                 continue;
10135
10136                         new_plane_state =
10137                                 drm_atomic_get_plane_state(state, plane);
10138
10139                         if (IS_ERR(new_plane_state)) {
10140                                 ret = PTR_ERR(new_plane_state);
10141                                 goto fail;
10142                         }
10143                 }
10144         }
10145
10146         /* Remove exiting planes if they are modified */
10147         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10148                 ret = dm_update_plane_state(dc, state, plane,
10149                                             old_plane_state,
10150                                             new_plane_state,
10151                                             false,
10152                                             &lock_and_validation_needed);
10153                 if (ret)
10154                         goto fail;
10155         }
10156
10157         /* Disable all crtcs which require disable */
10158         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10159                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10160                                            old_crtc_state,
10161                                            new_crtc_state,
10162                                            false,
10163                                            &lock_and_validation_needed);
10164                 if (ret)
10165                         goto fail;
10166         }
10167
10168         /* Enable all crtcs which require enable */
10169         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10170                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10171                                            old_crtc_state,
10172                                            new_crtc_state,
10173                                            true,
10174                                            &lock_and_validation_needed);
10175                 if (ret)
10176                         goto fail;
10177         }
10178
10179         ret = validate_overlay(state);
10180         if (ret)
10181                 goto fail;
10182
10183         /* Add new/modified planes */
10184         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10185                 ret = dm_update_plane_state(dc, state, plane,
10186                                             old_plane_state,
10187                                             new_plane_state,
10188                                             true,
10189                                             &lock_and_validation_needed);
10190                 if (ret)
10191                         goto fail;
10192         }
10193
10194         /* Run this here since we want to validate the streams we created */
10195         ret = drm_atomic_helper_check_planes(dev, state);
10196         if (ret)
10197                 goto fail;
10198
10199         /* Check cursor planes scaling */
10200         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10201                 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10202                 if (ret)
10203                         goto fail;
10204         }
10205
10206         if (state->legacy_cursor_update) {
10207                 /*
10208                  * This is a fast cursor update coming from the plane update
10209                  * helper, check if it can be done asynchronously for better
10210                  * performance.
10211                  */
10212                 state->async_update =
10213                         !drm_atomic_helper_async_check(dev, state);
10214
10215                 /*
10216                  * Skip the remaining global validation if this is an async
10217                  * update. Cursor updates can be done without affecting
10218                  * state or bandwidth calcs and this avoids the performance
10219                  * penalty of locking the private state object and
10220                  * allocating a new dc_state.
10221                  */
10222                 if (state->async_update)
10223                         return 0;
10224         }
10225
10226         /* Check scaling and underscan changes*/
10227         /* TODO Removed scaling changes validation due to inability to commit
10228          * new stream into context w\o causing full reset. Need to
10229          * decide how to handle.
10230          */
10231         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10232                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10233                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10234                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10235
10236                 /* Skip any modesets/resets */
10237                 if (!acrtc || drm_atomic_crtc_needs_modeset(
10238                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10239                         continue;
10240
10241                 /* Skip any thing not scale or underscan changes */
10242                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10243                         continue;
10244
10245                 lock_and_validation_needed = true;
10246         }
10247
10248         /**
10249          * Streams and planes are reset when there are changes that affect
10250          * bandwidth. Anything that affects bandwidth needs to go through
10251          * DC global validation to ensure that the configuration can be applied
10252          * to hardware.
10253          *
10254          * We have to currently stall out here in atomic_check for outstanding
10255          * commits to finish in this case because our IRQ handlers reference
10256          * DRM state directly - we can end up disabling interrupts too early
10257          * if we don't.
10258          *
10259          * TODO: Remove this stall and drop DM state private objects.
10260          */
10261         if (lock_and_validation_needed) {
10262                 ret = dm_atomic_get_state(state, &dm_state);
10263                 if (ret)
10264                         goto fail;
10265
10266                 ret = do_aquire_global_lock(dev, state);
10267                 if (ret)
10268                         goto fail;
10269
10270 #if defined(CONFIG_DRM_AMD_DC_DCN)
10271                 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
10272                         goto fail;
10273
10274                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
10275                 if (ret)
10276                         goto fail;
10277 #endif
10278
10279                 /*
10280                  * Perform validation of MST topology in the state:
10281                  * We need to perform MST atomic check before calling
10282                  * dc_validate_global_state(), or there is a chance
10283                  * to get stuck in an infinite loop and hang eventually.
10284                  */
10285                 ret = drm_dp_mst_atomic_check(state);
10286                 if (ret)
10287                         goto fail;
10288                 status = dc_validate_global_state(dc, dm_state->context, false);
10289                 if (status != DC_OK) {
10290                         DC_LOG_WARNING("DC global validation failure: %s (%d)",
10291                                        dc_status_to_str(status), status);
10292                         ret = -EINVAL;
10293                         goto fail;
10294                 }
10295         } else {
10296                 /*
10297                  * The commit is a fast update. Fast updates shouldn't change
10298                  * the DC context, affect global validation, and can have their
10299                  * commit work done in parallel with other commits not touching
10300                  * the same resource. If we have a new DC context as part of
10301                  * the DM atomic state from validation we need to free it and
10302                  * retain the existing one instead.
10303                  *
10304                  * Furthermore, since the DM atomic state only contains the DC
10305                  * context and can safely be annulled, we can free the state
10306                  * and clear the associated private object now to free
10307                  * some memory and avoid a possible use-after-free later.
10308                  */
10309
10310                 for (i = 0; i < state->num_private_objs; i++) {
10311                         struct drm_private_obj *obj = state->private_objs[i].ptr;
10312
10313                         if (obj->funcs == adev->dm.atomic_obj.funcs) {
10314                                 int j = state->num_private_objs-1;
10315
10316                                 dm_atomic_destroy_state(obj,
10317                                                 state->private_objs[i].state);
10318
10319                                 /* If i is not at the end of the array then the
10320                                  * last element needs to be moved to where i was
10321                                  * before the array can safely be truncated.
10322                                  */
10323                                 if (i != j)
10324                                         state->private_objs[i] =
10325                                                 state->private_objs[j];
10326
10327                                 state->private_objs[j].ptr = NULL;
10328                                 state->private_objs[j].state = NULL;
10329                                 state->private_objs[j].old_state = NULL;
10330                                 state->private_objs[j].new_state = NULL;
10331
10332                                 state->num_private_objs = j;
10333                                 break;
10334                         }
10335                 }
10336         }
10337
10338         /* Store the overall update type for use later in atomic check. */
10339         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10340                 struct dm_crtc_state *dm_new_crtc_state =
10341                         to_dm_crtc_state(new_crtc_state);
10342
10343                 dm_new_crtc_state->update_type = lock_and_validation_needed ?
10344                                                          UPDATE_TYPE_FULL :
10345                                                          UPDATE_TYPE_FAST;
10346         }
10347
10348         /* Must be success */
10349         WARN_ON(ret);
10350
10351         trace_amdgpu_dm_atomic_check_finish(state, ret);
10352
10353         return ret;
10354
10355 fail:
10356         if (ret == -EDEADLK)
10357                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
10358         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
10359                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
10360         else
10361                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
10362
10363         trace_amdgpu_dm_atomic_check_finish(state, ret);
10364
10365         return ret;
10366 }
10367
10368 static bool is_dp_capable_without_timing_msa(struct dc *dc,
10369                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
10370 {
10371         uint8_t dpcd_data;
10372         bool capable = false;
10373
10374         if (amdgpu_dm_connector->dc_link &&
10375                 dm_helpers_dp_read_dpcd(
10376                                 NULL,
10377                                 amdgpu_dm_connector->dc_link,
10378                                 DP_DOWN_STREAM_PORT_COUNT,
10379                                 &dpcd_data,
10380                                 sizeof(dpcd_data))) {
10381                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10382         }
10383
10384         return capable;
10385 }
10386
10387 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10388                 uint8_t *edid_ext, int len,
10389                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10390 {
10391         int i;
10392         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10393         struct dc *dc = adev->dm.dc;
10394
10395         /* send extension block to DMCU for parsing */
10396         for (i = 0; i < len; i += 8) {
10397                 bool res;
10398                 int offset;
10399
10400                 /* send 8 bytes a time */
10401                 if (!dc_edid_parser_send_cea(dc, i, len, &edid_ext[i], 8))
10402                         return false;
10403
10404                 if (i+8 == len) {
10405                         /* EDID block sent completed, expect result */
10406                         int version, min_rate, max_rate;
10407
10408                         res = dc_edid_parser_recv_amd_vsdb(dc, &version, &min_rate, &max_rate);
10409                         if (res) {
10410                                 /* amd vsdb found */
10411                                 vsdb_info->freesync_supported = 1;
10412                                 vsdb_info->amd_vsdb_version = version;
10413                                 vsdb_info->min_refresh_rate_hz = min_rate;
10414                                 vsdb_info->max_refresh_rate_hz = max_rate;
10415                                 return true;
10416                         }
10417                         /* not amd vsdb */
10418                         return false;
10419                 }
10420
10421                 /* check for ack*/
10422                 res = dc_edid_parser_recv_cea_ack(dc, &offset);
10423                 if (!res)
10424                         return false;
10425         }
10426
10427         return false;
10428 }
10429
10430 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10431                 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10432 {
10433         uint8_t *edid_ext = NULL;
10434         int i;
10435         bool valid_vsdb_found = false;
10436
10437         /*----- drm_find_cea_extension() -----*/
10438         /* No EDID or EDID extensions */
10439         if (edid == NULL || edid->extensions == 0)
10440                 return -ENODEV;
10441
10442         /* Find CEA extension */
10443         for (i = 0; i < edid->extensions; i++) {
10444                 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10445                 if (edid_ext[0] == CEA_EXT)
10446                         break;
10447         }
10448
10449         if (i == edid->extensions)
10450                 return -ENODEV;
10451
10452         /*----- cea_db_offsets() -----*/
10453         if (edid_ext[0] != CEA_EXT)
10454                 return -ENODEV;
10455
10456         valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
10457
10458         return valid_vsdb_found ? i : -ENODEV;
10459 }
10460
10461 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10462                                         struct edid *edid)
10463 {
10464         int i = 0;
10465         struct detailed_timing *timing;
10466         struct detailed_non_pixel *data;
10467         struct detailed_data_monitor_range *range;
10468         struct amdgpu_dm_connector *amdgpu_dm_connector =
10469                         to_amdgpu_dm_connector(connector);
10470         struct dm_connector_state *dm_con_state = NULL;
10471
10472         struct drm_device *dev = connector->dev;
10473         struct amdgpu_device *adev = drm_to_adev(dev);
10474         bool freesync_capable = false;
10475         struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
10476
10477         if (!connector->state) {
10478                 DRM_ERROR("%s - Connector has no state", __func__);
10479                 goto update;
10480         }
10481
10482         if (!edid) {
10483                 dm_con_state = to_dm_connector_state(connector->state);
10484
10485                 amdgpu_dm_connector->min_vfreq = 0;
10486                 amdgpu_dm_connector->max_vfreq = 0;
10487                 amdgpu_dm_connector->pixel_clock_mhz = 0;
10488
10489                 goto update;
10490         }
10491
10492         dm_con_state = to_dm_connector_state(connector->state);
10493
10494         if (!amdgpu_dm_connector->dc_sink) {
10495                 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
10496                 goto update;
10497         }
10498         if (!adev->dm.freesync_module)
10499                 goto update;
10500
10501
10502         if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10503                 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
10504                 bool edid_check_required = false;
10505
10506                 if (edid) {
10507                         edid_check_required = is_dp_capable_without_timing_msa(
10508                                                 adev->dm.dc,
10509                                                 amdgpu_dm_connector);
10510                 }
10511
10512                 if (edid_check_required == true && (edid->version > 1 ||
10513                    (edid->version == 1 && edid->revision > 1))) {
10514                         for (i = 0; i < 4; i++) {
10515
10516                                 timing  = &edid->detailed_timings[i];
10517                                 data    = &timing->data.other_data;
10518                                 range   = &data->data.range;
10519                                 /*
10520                                  * Check if monitor has continuous frequency mode
10521                                  */
10522                                 if (data->type != EDID_DETAIL_MONITOR_RANGE)
10523                                         continue;
10524                                 /*
10525                                  * Check for flag range limits only. If flag == 1 then
10526                                  * no additional timing information provided.
10527                                  * Default GTF, GTF Secondary curve and CVT are not
10528                                  * supported
10529                                  */
10530                                 if (range->flags != 1)
10531                                         continue;
10532
10533                                 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10534                                 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10535                                 amdgpu_dm_connector->pixel_clock_mhz =
10536                                         range->pixel_clock_mhz * 10;
10537
10538                                 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10539                                 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10540
10541                                 break;
10542                         }
10543
10544                         if (amdgpu_dm_connector->max_vfreq -
10545                             amdgpu_dm_connector->min_vfreq > 10) {
10546
10547                                 freesync_capable = true;
10548                         }
10549                 }
10550         } else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10551                 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10552                 if (i >= 0 && vsdb_info.freesync_supported) {
10553                         timing  = &edid->detailed_timings[i];
10554                         data    = &timing->data.other_data;
10555
10556                         amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10557                         amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10558                         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10559                                 freesync_capable = true;
10560
10561                         connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10562                         connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10563                 }
10564         }
10565
10566 update:
10567         if (dm_con_state)
10568                 dm_con_state->freesync_capable = freesync_capable;
10569
10570         if (connector->vrr_capable_property)
10571                 drm_connector_set_vrr_capable_property(connector,
10572                                                        freesync_capable);
10573 }
10574
10575 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
10576 {
10577         uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
10578
10579         if (!(link->connector_signal & SIGNAL_TYPE_EDP))
10580                 return;
10581         if (link->type == dc_connection_none)
10582                 return;
10583         if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
10584                                         dpcd_data, sizeof(dpcd_data))) {
10585                 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
10586
10587                 if (dpcd_data[0] == 0) {
10588                         link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
10589                         link->psr_settings.psr_feature_enabled = false;
10590                 } else {
10591                         link->psr_settings.psr_version = DC_PSR_VERSION_1;
10592                         link->psr_settings.psr_feature_enabled = true;
10593                 }
10594
10595                 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
10596         }
10597 }
10598
10599 /*
10600  * amdgpu_dm_link_setup_psr() - configure psr link
10601  * @stream: stream state
10602  *
10603  * Return: true if success
10604  */
10605 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
10606 {
10607         struct dc_link *link = NULL;
10608         struct psr_config psr_config = {0};
10609         struct psr_context psr_context = {0};
10610         bool ret = false;
10611
10612         if (stream == NULL)
10613                 return false;
10614
10615         link = stream->link;
10616
10617         psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
10618
10619         if (psr_config.psr_version > 0) {
10620                 psr_config.psr_exit_link_training_required = 0x1;
10621                 psr_config.psr_frame_capture_indication_req = 0;
10622                 psr_config.psr_rfb_setup_time = 0x37;
10623                 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
10624                 psr_config.allow_smu_optimizations = 0x0;
10625
10626                 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
10627
10628         }
10629         DRM_DEBUG_DRIVER("PSR link: %d\n",      link->psr_settings.psr_feature_enabled);
10630
10631         return ret;
10632 }
10633
10634 /*
10635  * amdgpu_dm_psr_enable() - enable psr f/w
10636  * @stream: stream state
10637  *
10638  * Return: true if success
10639  */
10640 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
10641 {
10642         struct dc_link *link = stream->link;
10643         unsigned int vsync_rate_hz = 0;
10644         struct dc_static_screen_params params = {0};
10645         /* Calculate number of static frames before generating interrupt to
10646          * enter PSR.
10647          */
10648         // Init fail safe of 2 frames static
10649         unsigned int num_frames_static = 2;
10650
10651         DRM_DEBUG_DRIVER("Enabling psr...\n");
10652
10653         vsync_rate_hz = div64_u64(div64_u64((
10654                         stream->timing.pix_clk_100hz * 100),
10655                         stream->timing.v_total),
10656                         stream->timing.h_total);
10657
10658         /* Round up
10659          * Calculate number of frames such that at least 30 ms of time has
10660          * passed.
10661          */
10662         if (vsync_rate_hz != 0) {
10663                 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
10664                 num_frames_static = (30000 / frame_time_microsec) + 1;
10665         }
10666
10667         params.triggers.cursor_update = true;
10668         params.triggers.overlay_update = true;
10669         params.triggers.surface_update = true;
10670         params.num_frames = num_frames_static;
10671
10672         dc_stream_set_static_screen_params(link->ctx->dc,
10673                                            &stream, 1,
10674                                            &params);
10675
10676         return dc_link_set_psr_allow_active(link, true, false, false);
10677 }
10678
10679 /*
10680  * amdgpu_dm_psr_disable() - disable psr f/w
10681  * @stream:  stream state
10682  *
10683  * Return: true if success
10684  */
10685 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
10686 {
10687
10688         DRM_DEBUG_DRIVER("Disabling psr...\n");
10689
10690         return dc_link_set_psr_allow_active(stream->link, false, true, false);
10691 }
10692
10693 /*
10694  * amdgpu_dm_psr_disable() - disable psr f/w
10695  * if psr is enabled on any stream
10696  *
10697  * Return: true if success
10698  */
10699 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
10700 {
10701         DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
10702         return dc_set_psr_allow_active(dm->dc, false);
10703 }
10704
10705 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10706 {
10707         struct amdgpu_device *adev = drm_to_adev(dev);
10708         struct dc *dc = adev->dm.dc;
10709         int i;
10710
10711         mutex_lock(&adev->dm.dc_lock);
10712         if (dc->current_state) {
10713                 for (i = 0; i < dc->current_state->stream_count; ++i)
10714                         dc->current_state->streams[i]
10715                                 ->triggered_crtc_reset.enabled =
10716                                 adev->dm.force_timing_sync;
10717
10718                 dm_enable_per_frame_crtc_master_sync(dc->current_state);
10719                 dc_trigger_sync(dc, dc->current_state);
10720         }
10721         mutex_unlock(&adev->dm.dc_lock);
10722 }
10723
10724 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10725                        uint32_t value, const char *func_name)
10726 {
10727 #ifdef DM_CHECK_ADDR_0
10728         if (address == 0) {
10729                 DC_ERR("invalid register write. address = 0");
10730                 return;
10731         }
10732 #endif
10733         cgs_write_register(ctx->cgs_device, address, value);
10734         trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10735 }
10736
10737 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10738                           const char *func_name)
10739 {
10740         uint32_t value;
10741 #ifdef DM_CHECK_ADDR_0
10742         if (address == 0) {
10743                 DC_ERR("invalid register read; address = 0\n");
10744                 return 0;
10745         }
10746 #endif
10747
10748         if (ctx->dmub_srv &&
10749             ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10750             !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10751                 ASSERT(false);
10752                 return 0;
10753         }
10754
10755         value = cgs_read_register(ctx->cgs_device, address);
10756
10757         trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10758
10759         return value;
10760 }
10761
10762 int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int linkIndex,
10763                                 struct aux_payload *payload, enum aux_return_code_type *operation_result)
10764 {
10765         struct amdgpu_device *adev = ctx->driver_context;
10766         int ret = 0;
10767
10768         dc_process_dmub_aux_transfer_async(ctx->dc, linkIndex, payload);
10769         ret = wait_for_completion_interruptible_timeout(&adev->dm.dmub_aux_transfer_done, 10*HZ);
10770         if (ret == 0) {
10771                 *operation_result = AUX_RET_ERROR_TIMEOUT;
10772                 return -1;
10773         }
10774         *operation_result = (enum aux_return_code_type)adev->dm.dmub_notify->result;
10775
10776         if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
10777                 (*payload->reply) = adev->dm.dmub_notify->aux_reply.command;
10778
10779                 // For read case, Copy data to payload
10780                 if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
10781                 (*payload->reply == AUX_TRANSACTION_REPLY_AUX_ACK))
10782                         memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
10783                         adev->dm.dmub_notify->aux_reply.length);
10784         }
10785
10786         return adev->dm.dmub_notify->aux_reply.length;
10787 }