drm/amdgpu/display: remove an old DCN3 guard
[platform/kernel/linux-starfive.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "dc/dc_edid_parser.h"
38 #include "dc/dc_stat.h"
39 #include "amdgpu_dm_trace.h"
40
41 #include "vid.h"
42 #include "amdgpu.h"
43 #include "amdgpu_display.h"
44 #include "amdgpu_ucode.h"
45 #include "atom.h"
46 #include "amdgpu_dm.h"
47 #ifdef CONFIG_DRM_AMD_DC_HDCP
48 #include "amdgpu_dm_hdcp.h"
49 #include <drm/drm_hdcp.h>
50 #endif
51 #include "amdgpu_pm.h"
52
53 #include "amd_shared.h"
54 #include "amdgpu_dm_irq.h"
55 #include "dm_helpers.h"
56 #include "amdgpu_dm_mst_types.h"
57 #if defined(CONFIG_DEBUG_FS)
58 #include "amdgpu_dm_debugfs.h"
59 #endif
60
61 #include "ivsrcid/ivsrcid_vislands30.h"
62
63 #include "i2caux_interface.h"
64 #include <linux/module.h>
65 #include <linux/moduleparam.h>
66 #include <linux/types.h>
67 #include <linux/pm_runtime.h>
68 #include <linux/pci.h>
69 #include <linux/firmware.h>
70 #include <linux/component.h>
71
72 #include <drm/drm_atomic.h>
73 #include <drm/drm_atomic_uapi.h>
74 #include <drm/drm_atomic_helper.h>
75 #include <drm/drm_dp_mst_helper.h>
76 #include <drm/drm_fb_helper.h>
77 #include <drm/drm_fourcc.h>
78 #include <drm/drm_edid.h>
79 #include <drm/drm_vblank.h>
80 #include <drm/drm_audio_component.h>
81
82 #if defined(CONFIG_DRM_AMD_DC_DCN)
83 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
84
85 #include "dcn/dcn_1_0_offset.h"
86 #include "dcn/dcn_1_0_sh_mask.h"
87 #include "soc15_hw_ip.h"
88 #include "vega10_ip_offset.h"
89
90 #include "soc15_common.h"
91 #endif
92
93 #include "modules/inc/mod_freesync.h"
94 #include "modules/power/power_helpers.h"
95 #include "modules/inc/mod_info_packet.h"
96
97 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
98 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
99 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
100 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
101 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
103 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
105 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
107 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
108 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
109
110 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
111 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
112
113 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
114 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
115
116 /* Number of bytes in PSP header for firmware. */
117 #define PSP_HEADER_BYTES 0x100
118
119 /* Number of bytes in PSP footer for firmware. */
120 #define PSP_FOOTER_BYTES 0x100
121
122 /**
123  * DOC: overview
124  *
125  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
126  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
127  * requests into DC requests, and DC responses into DRM responses.
128  *
129  * The root control structure is &struct amdgpu_display_manager.
130  */
131
132 /* basic init/fini API */
133 static int amdgpu_dm_init(struct amdgpu_device *adev);
134 static void amdgpu_dm_fini(struct amdgpu_device *adev);
135 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
136
137 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
138 {
139         switch (link->dpcd_caps.dongle_type) {
140         case DISPLAY_DONGLE_NONE:
141                 return DRM_MODE_SUBCONNECTOR_Native;
142         case DISPLAY_DONGLE_DP_VGA_CONVERTER:
143                 return DRM_MODE_SUBCONNECTOR_VGA;
144         case DISPLAY_DONGLE_DP_DVI_CONVERTER:
145         case DISPLAY_DONGLE_DP_DVI_DONGLE:
146                 return DRM_MODE_SUBCONNECTOR_DVID;
147         case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
148         case DISPLAY_DONGLE_DP_HDMI_DONGLE:
149                 return DRM_MODE_SUBCONNECTOR_HDMIA;
150         case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
151         default:
152                 return DRM_MODE_SUBCONNECTOR_Unknown;
153         }
154 }
155
156 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
157 {
158         struct dc_link *link = aconnector->dc_link;
159         struct drm_connector *connector = &aconnector->base;
160         enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
161
162         if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
163                 return;
164
165         if (aconnector->dc_sink)
166                 subconnector = get_subconnector_type(link);
167
168         drm_object_property_set_value(&connector->base,
169                         connector->dev->mode_config.dp_subconnector_property,
170                         subconnector);
171 }
172
173 /*
174  * initializes drm_device display related structures, based on the information
175  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
176  * drm_encoder, drm_mode_config
177  *
178  * Returns 0 on success
179  */
180 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
181 /* removes and deallocates the drm structures, created by the above function */
182 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
183
184 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
185                                 struct drm_plane *plane,
186                                 unsigned long possible_crtcs,
187                                 const struct dc_plane_cap *plane_cap);
188 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
189                                struct drm_plane *plane,
190                                uint32_t link_index);
191 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
192                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
193                                     uint32_t link_index,
194                                     struct amdgpu_encoder *amdgpu_encoder);
195 static int amdgpu_dm_encoder_init(struct drm_device *dev,
196                                   struct amdgpu_encoder *aencoder,
197                                   uint32_t link_index);
198
199 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
200
201 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
202
203 static int amdgpu_dm_atomic_check(struct drm_device *dev,
204                                   struct drm_atomic_state *state);
205
206 static void handle_cursor_update(struct drm_plane *plane,
207                                  struct drm_plane_state *old_plane_state);
208
209 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
210 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
211 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
212 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
213 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
214
215 static const struct drm_format_info *
216 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
217
218 static bool
219 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
220                                  struct drm_crtc_state *new_crtc_state);
221 /*
222  * dm_vblank_get_counter
223  *
224  * @brief
225  * Get counter for number of vertical blanks
226  *
227  * @param
228  * struct amdgpu_device *adev - [in] desired amdgpu device
229  * int disp_idx - [in] which CRTC to get the counter from
230  *
231  * @return
232  * Counter for vertical blanks
233  */
234 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
235 {
236         if (crtc >= adev->mode_info.num_crtc)
237                 return 0;
238         else {
239                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
240
241                 if (acrtc->dm_irq_params.stream == NULL) {
242                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
243                                   crtc);
244                         return 0;
245                 }
246
247                 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
248         }
249 }
250
251 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
252                                   u32 *vbl, u32 *position)
253 {
254         uint32_t v_blank_start, v_blank_end, h_position, v_position;
255
256         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
257                 return -EINVAL;
258         else {
259                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
260
261                 if (acrtc->dm_irq_params.stream ==  NULL) {
262                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
263                                   crtc);
264                         return 0;
265                 }
266
267                 /*
268                  * TODO rework base driver to use values directly.
269                  * for now parse it back into reg-format
270                  */
271                 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
272                                          &v_blank_start,
273                                          &v_blank_end,
274                                          &h_position,
275                                          &v_position);
276
277                 *position = v_position | (h_position << 16);
278                 *vbl = v_blank_start | (v_blank_end << 16);
279         }
280
281         return 0;
282 }
283
284 static bool dm_is_idle(void *handle)
285 {
286         /* XXX todo */
287         return true;
288 }
289
290 static int dm_wait_for_idle(void *handle)
291 {
292         /* XXX todo */
293         return 0;
294 }
295
296 static bool dm_check_soft_reset(void *handle)
297 {
298         return false;
299 }
300
301 static int dm_soft_reset(void *handle)
302 {
303         /* XXX todo */
304         return 0;
305 }
306
307 static struct amdgpu_crtc *
308 get_crtc_by_otg_inst(struct amdgpu_device *adev,
309                      int otg_inst)
310 {
311         struct drm_device *dev = adev_to_drm(adev);
312         struct drm_crtc *crtc;
313         struct amdgpu_crtc *amdgpu_crtc;
314
315         if (otg_inst == -1) {
316                 WARN_ON(1);
317                 return adev->mode_info.crtcs[0];
318         }
319
320         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
321                 amdgpu_crtc = to_amdgpu_crtc(crtc);
322
323                 if (amdgpu_crtc->otg_inst == otg_inst)
324                         return amdgpu_crtc;
325         }
326
327         return NULL;
328 }
329
330 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
331 {
332         return acrtc->dm_irq_params.freesync_config.state ==
333                        VRR_STATE_ACTIVE_VARIABLE ||
334                acrtc->dm_irq_params.freesync_config.state ==
335                        VRR_STATE_ACTIVE_FIXED;
336 }
337
338 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
339 {
340         return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
341                dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
342 }
343
344 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
345                                               struct dm_crtc_state *new_state)
346 {
347         if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
348                 return true;
349         else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
350                 return true;
351         else
352                 return false;
353 }
354
355 /**
356  * dm_pflip_high_irq() - Handle pageflip interrupt
357  * @interrupt_params: ignored
358  *
359  * Handles the pageflip interrupt by notifying all interested parties
360  * that the pageflip has been completed.
361  */
362 static void dm_pflip_high_irq(void *interrupt_params)
363 {
364         struct amdgpu_crtc *amdgpu_crtc;
365         struct common_irq_params *irq_params = interrupt_params;
366         struct amdgpu_device *adev = irq_params->adev;
367         unsigned long flags;
368         struct drm_pending_vblank_event *e;
369         uint32_t vpos, hpos, v_blank_start, v_blank_end;
370         bool vrr_active;
371
372         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
373
374         /* IRQ could occur when in initial stage */
375         /* TODO work and BO cleanup */
376         if (amdgpu_crtc == NULL) {
377                 DC_LOG_PFLIP("CRTC is null, returning.\n");
378                 return;
379         }
380
381         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
382
383         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
384                 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
385                                                  amdgpu_crtc->pflip_status,
386                                                  AMDGPU_FLIP_SUBMITTED,
387                                                  amdgpu_crtc->crtc_id,
388                                                  amdgpu_crtc);
389                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
390                 return;
391         }
392
393         /* page flip completed. */
394         e = amdgpu_crtc->event;
395         amdgpu_crtc->event = NULL;
396
397         if (!e)
398                 WARN_ON(1);
399
400         vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
401
402         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
403         if (!vrr_active ||
404             !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
405                                       &v_blank_end, &hpos, &vpos) ||
406             (vpos < v_blank_start)) {
407                 /* Update to correct count and vblank timestamp if racing with
408                  * vblank irq. This also updates to the correct vblank timestamp
409                  * even in VRR mode, as scanout is past the front-porch atm.
410                  */
411                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
412
413                 /* Wake up userspace by sending the pageflip event with proper
414                  * count and timestamp of vblank of flip completion.
415                  */
416                 if (e) {
417                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
418
419                         /* Event sent, so done with vblank for this flip */
420                         drm_crtc_vblank_put(&amdgpu_crtc->base);
421                 }
422         } else if (e) {
423                 /* VRR active and inside front-porch: vblank count and
424                  * timestamp for pageflip event will only be up to date after
425                  * drm_crtc_handle_vblank() has been executed from late vblank
426                  * irq handler after start of back-porch (vline 0). We queue the
427                  * pageflip event for send-out by drm_crtc_handle_vblank() with
428                  * updated timestamp and count, once it runs after us.
429                  *
430                  * We need to open-code this instead of using the helper
431                  * drm_crtc_arm_vblank_event(), as that helper would
432                  * call drm_crtc_accurate_vblank_count(), which we must
433                  * not call in VRR mode while we are in front-porch!
434                  */
435
436                 /* sequence will be replaced by real count during send-out. */
437                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
438                 e->pipe = amdgpu_crtc->crtc_id;
439
440                 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
441                 e = NULL;
442         }
443
444         /* Keep track of vblank of this flip for flip throttling. We use the
445          * cooked hw counter, as that one incremented at start of this vblank
446          * of pageflip completion, so last_flip_vblank is the forbidden count
447          * for queueing new pageflips if vsync + VRR is enabled.
448          */
449         amdgpu_crtc->dm_irq_params.last_flip_vblank =
450                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
451
452         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
453         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
454
455         DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
456                      amdgpu_crtc->crtc_id, amdgpu_crtc,
457                      vrr_active, (int) !e);
458 }
459
460 static void dm_vupdate_high_irq(void *interrupt_params)
461 {
462         struct common_irq_params *irq_params = interrupt_params;
463         struct amdgpu_device *adev = irq_params->adev;
464         struct amdgpu_crtc *acrtc;
465         struct drm_device *drm_dev;
466         struct drm_vblank_crtc *vblank;
467         ktime_t frame_duration_ns, previous_timestamp;
468         unsigned long flags;
469         int vrr_active;
470
471         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
472
473         if (acrtc) {
474                 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
475                 drm_dev = acrtc->base.dev;
476                 vblank = &drm_dev->vblank[acrtc->base.index];
477                 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
478                 frame_duration_ns = vblank->time - previous_timestamp;
479
480                 if (frame_duration_ns > 0) {
481                         trace_amdgpu_refresh_rate_track(acrtc->base.index,
482                                                 frame_duration_ns,
483                                                 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
484                         atomic64_set(&irq_params->previous_timestamp, vblank->time);
485                 }
486
487                 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
488                               acrtc->crtc_id,
489                               vrr_active);
490
491                 /* Core vblank handling is done here after end of front-porch in
492                  * vrr mode, as vblank timestamping will give valid results
493                  * while now done after front-porch. This will also deliver
494                  * page-flip completion events that have been queued to us
495                  * if a pageflip happened inside front-porch.
496                  */
497                 if (vrr_active) {
498                         drm_crtc_handle_vblank(&acrtc->base);
499
500                         /* BTR processing for pre-DCE12 ASICs */
501                         if (acrtc->dm_irq_params.stream &&
502                             adev->family < AMDGPU_FAMILY_AI) {
503                                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
504                                 mod_freesync_handle_v_update(
505                                     adev->dm.freesync_module,
506                                     acrtc->dm_irq_params.stream,
507                                     &acrtc->dm_irq_params.vrr_params);
508
509                                 dc_stream_adjust_vmin_vmax(
510                                     adev->dm.dc,
511                                     acrtc->dm_irq_params.stream,
512                                     &acrtc->dm_irq_params.vrr_params.adjust);
513                                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
514                         }
515                 }
516         }
517 }
518
519 /**
520  * dm_crtc_high_irq() - Handles CRTC interrupt
521  * @interrupt_params: used for determining the CRTC instance
522  *
523  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
524  * event handler.
525  */
526 static void dm_crtc_high_irq(void *interrupt_params)
527 {
528         struct common_irq_params *irq_params = interrupt_params;
529         struct amdgpu_device *adev = irq_params->adev;
530         struct amdgpu_crtc *acrtc;
531         unsigned long flags;
532         int vrr_active;
533
534         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
535         if (!acrtc)
536                 return;
537
538         vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
539
540         DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
541                       vrr_active, acrtc->dm_irq_params.active_planes);
542
543         /**
544          * Core vblank handling at start of front-porch is only possible
545          * in non-vrr mode, as only there vblank timestamping will give
546          * valid results while done in front-porch. Otherwise defer it
547          * to dm_vupdate_high_irq after end of front-porch.
548          */
549         if (!vrr_active)
550                 drm_crtc_handle_vblank(&acrtc->base);
551
552         /**
553          * Following stuff must happen at start of vblank, for crc
554          * computation and below-the-range btr support in vrr mode.
555          */
556         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
557
558         /* BTR updates need to happen before VUPDATE on Vega and above. */
559         if (adev->family < AMDGPU_FAMILY_AI)
560                 return;
561
562         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
563
564         if (acrtc->dm_irq_params.stream &&
565             acrtc->dm_irq_params.vrr_params.supported &&
566             acrtc->dm_irq_params.freesync_config.state ==
567                     VRR_STATE_ACTIVE_VARIABLE) {
568                 mod_freesync_handle_v_update(adev->dm.freesync_module,
569                                              acrtc->dm_irq_params.stream,
570                                              &acrtc->dm_irq_params.vrr_params);
571
572                 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
573                                            &acrtc->dm_irq_params.vrr_params.adjust);
574         }
575
576         /*
577          * If there aren't any active_planes then DCH HUBP may be clock-gated.
578          * In that case, pageflip completion interrupts won't fire and pageflip
579          * completion events won't get delivered. Prevent this by sending
580          * pending pageflip events from here if a flip is still pending.
581          *
582          * If any planes are enabled, use dm_pflip_high_irq() instead, to
583          * avoid race conditions between flip programming and completion,
584          * which could cause too early flip completion events.
585          */
586         if (adev->family >= AMDGPU_FAMILY_RV &&
587             acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
588             acrtc->dm_irq_params.active_planes == 0) {
589                 if (acrtc->event) {
590                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
591                         acrtc->event = NULL;
592                         drm_crtc_vblank_put(&acrtc->base);
593                 }
594                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
595         }
596
597         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
598 }
599
600 #if defined(CONFIG_DRM_AMD_DC_DCN)
601 /**
602  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
603  * DCN generation ASICs
604  * @interrupt params - interrupt parameters
605  *
606  * Used to set crc window/read out crc value at vertical line 0 position
607  */
608 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
609 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
610 {
611         struct common_irq_params *irq_params = interrupt_params;
612         struct amdgpu_device *adev = irq_params->adev;
613         struct amdgpu_crtc *acrtc;
614
615         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
616
617         if (!acrtc)
618                 return;
619
620         amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
621 }
622 #endif
623 #endif
624
625 /**
626  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
627  * @interrupt_params: used for determining the Outbox instance
628  *
629  * Handles the Outbox Interrupt
630  * event handler.
631  */
632 #define DMUB_TRACE_MAX_READ 64
633 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
634 {
635         struct dmub_notification notify;
636         struct common_irq_params *irq_params = interrupt_params;
637         struct amdgpu_device *adev = irq_params->adev;
638         struct amdgpu_display_manager *dm = &adev->dm;
639         struct dmcub_trace_buf_entry entry = { 0 };
640         uint32_t count = 0;
641
642         if (dc_enable_dmub_notifications(adev->dm.dc)) {
643                 if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
644                         do {
645                                 dc_stat_get_dmub_notification(adev->dm.dc, &notify);
646                         } while (notify.pending_notification);
647
648                         if (adev->dm.dmub_notify)
649                                 memcpy(adev->dm.dmub_notify, &notify, sizeof(struct dmub_notification));
650                         if (notify.type == DMUB_NOTIFICATION_AUX_REPLY)
651                                 complete(&adev->dm.dmub_aux_transfer_done);
652                         // TODO : HPD Implementation
653
654                 } else {
655                         DRM_ERROR("DM: Failed to receive correct outbox IRQ !");
656                 }
657         }
658
659
660         do {
661                 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
662                         trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
663                                                         entry.param0, entry.param1);
664
665                         DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
666                                  entry.trace_code, entry.tick_count, entry.param0, entry.param1);
667                 } else
668                         break;
669
670                 count++;
671
672         } while (count <= DMUB_TRACE_MAX_READ);
673
674         ASSERT(count <= DMUB_TRACE_MAX_READ);
675 }
676
677 static int dm_set_clockgating_state(void *handle,
678                   enum amd_clockgating_state state)
679 {
680         return 0;
681 }
682
683 static int dm_set_powergating_state(void *handle,
684                   enum amd_powergating_state state)
685 {
686         return 0;
687 }
688
689 /* Prototypes of private functions */
690 static int dm_early_init(void* handle);
691
692 /* Allocate memory for FBC compressed data  */
693 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
694 {
695         struct drm_device *dev = connector->dev;
696         struct amdgpu_device *adev = drm_to_adev(dev);
697         struct dm_compressor_info *compressor = &adev->dm.compressor;
698         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
699         struct drm_display_mode *mode;
700         unsigned long max_size = 0;
701
702         if (adev->dm.dc->fbc_compressor == NULL)
703                 return;
704
705         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
706                 return;
707
708         if (compressor->bo_ptr)
709                 return;
710
711
712         list_for_each_entry(mode, &connector->modes, head) {
713                 if (max_size < mode->htotal * mode->vtotal)
714                         max_size = mode->htotal * mode->vtotal;
715         }
716
717         if (max_size) {
718                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
719                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
720                             &compressor->gpu_addr, &compressor->cpu_addr);
721
722                 if (r)
723                         DRM_ERROR("DM: Failed to initialize FBC\n");
724                 else {
725                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
726                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
727                 }
728
729         }
730
731 }
732
733 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
734                                           int pipe, bool *enabled,
735                                           unsigned char *buf, int max_bytes)
736 {
737         struct drm_device *dev = dev_get_drvdata(kdev);
738         struct amdgpu_device *adev = drm_to_adev(dev);
739         struct drm_connector *connector;
740         struct drm_connector_list_iter conn_iter;
741         struct amdgpu_dm_connector *aconnector;
742         int ret = 0;
743
744         *enabled = false;
745
746         mutex_lock(&adev->dm.audio_lock);
747
748         drm_connector_list_iter_begin(dev, &conn_iter);
749         drm_for_each_connector_iter(connector, &conn_iter) {
750                 aconnector = to_amdgpu_dm_connector(connector);
751                 if (aconnector->audio_inst != port)
752                         continue;
753
754                 *enabled = true;
755                 ret = drm_eld_size(connector->eld);
756                 memcpy(buf, connector->eld, min(max_bytes, ret));
757
758                 break;
759         }
760         drm_connector_list_iter_end(&conn_iter);
761
762         mutex_unlock(&adev->dm.audio_lock);
763
764         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
765
766         return ret;
767 }
768
769 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
770         .get_eld = amdgpu_dm_audio_component_get_eld,
771 };
772
773 static int amdgpu_dm_audio_component_bind(struct device *kdev,
774                                        struct device *hda_kdev, void *data)
775 {
776         struct drm_device *dev = dev_get_drvdata(kdev);
777         struct amdgpu_device *adev = drm_to_adev(dev);
778         struct drm_audio_component *acomp = data;
779
780         acomp->ops = &amdgpu_dm_audio_component_ops;
781         acomp->dev = kdev;
782         adev->dm.audio_component = acomp;
783
784         return 0;
785 }
786
787 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
788                                           struct device *hda_kdev, void *data)
789 {
790         struct drm_device *dev = dev_get_drvdata(kdev);
791         struct amdgpu_device *adev = drm_to_adev(dev);
792         struct drm_audio_component *acomp = data;
793
794         acomp->ops = NULL;
795         acomp->dev = NULL;
796         adev->dm.audio_component = NULL;
797 }
798
799 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
800         .bind   = amdgpu_dm_audio_component_bind,
801         .unbind = amdgpu_dm_audio_component_unbind,
802 };
803
804 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
805 {
806         int i, ret;
807
808         if (!amdgpu_audio)
809                 return 0;
810
811         adev->mode_info.audio.enabled = true;
812
813         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
814
815         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
816                 adev->mode_info.audio.pin[i].channels = -1;
817                 adev->mode_info.audio.pin[i].rate = -1;
818                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
819                 adev->mode_info.audio.pin[i].status_bits = 0;
820                 adev->mode_info.audio.pin[i].category_code = 0;
821                 adev->mode_info.audio.pin[i].connected = false;
822                 adev->mode_info.audio.pin[i].id =
823                         adev->dm.dc->res_pool->audios[i]->inst;
824                 adev->mode_info.audio.pin[i].offset = 0;
825         }
826
827         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
828         if (ret < 0)
829                 return ret;
830
831         adev->dm.audio_registered = true;
832
833         return 0;
834 }
835
836 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
837 {
838         if (!amdgpu_audio)
839                 return;
840
841         if (!adev->mode_info.audio.enabled)
842                 return;
843
844         if (adev->dm.audio_registered) {
845                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
846                 adev->dm.audio_registered = false;
847         }
848
849         /* TODO: Disable audio? */
850
851         adev->mode_info.audio.enabled = false;
852 }
853
854 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
855 {
856         struct drm_audio_component *acomp = adev->dm.audio_component;
857
858         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
859                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
860
861                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
862                                                  pin, -1);
863         }
864 }
865
866 static int dm_dmub_hw_init(struct amdgpu_device *adev)
867 {
868         const struct dmcub_firmware_header_v1_0 *hdr;
869         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
870         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
871         const struct firmware *dmub_fw = adev->dm.dmub_fw;
872         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
873         struct abm *abm = adev->dm.dc->res_pool->abm;
874         struct dmub_srv_hw_params hw_params;
875         enum dmub_status status;
876         const unsigned char *fw_inst_const, *fw_bss_data;
877         uint32_t i, fw_inst_const_size, fw_bss_data_size;
878         bool has_hw_support;
879
880         if (!dmub_srv)
881                 /* DMUB isn't supported on the ASIC. */
882                 return 0;
883
884         if (!fb_info) {
885                 DRM_ERROR("No framebuffer info for DMUB service.\n");
886                 return -EINVAL;
887         }
888
889         if (!dmub_fw) {
890                 /* Firmware required for DMUB support. */
891                 DRM_ERROR("No firmware provided for DMUB.\n");
892                 return -EINVAL;
893         }
894
895         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
896         if (status != DMUB_STATUS_OK) {
897                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
898                 return -EINVAL;
899         }
900
901         if (!has_hw_support) {
902                 DRM_INFO("DMUB unsupported on ASIC\n");
903                 return 0;
904         }
905
906         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
907
908         fw_inst_const = dmub_fw->data +
909                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
910                         PSP_HEADER_BYTES;
911
912         fw_bss_data = dmub_fw->data +
913                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
914                       le32_to_cpu(hdr->inst_const_bytes);
915
916         /* Copy firmware and bios info into FB memory. */
917         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
918                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
919
920         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
921
922         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
923          * amdgpu_ucode_init_single_fw will load dmub firmware
924          * fw_inst_const part to cw0; otherwise, the firmware back door load
925          * will be done by dm_dmub_hw_init
926          */
927         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
928                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
929                                 fw_inst_const_size);
930         }
931
932         if (fw_bss_data_size)
933                 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
934                        fw_bss_data, fw_bss_data_size);
935
936         /* Copy firmware bios info into FB memory. */
937         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
938                adev->bios_size);
939
940         /* Reset regions that need to be reset. */
941         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
942         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
943
944         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
945                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
946
947         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
948                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
949
950         /* Initialize hardware. */
951         memset(&hw_params, 0, sizeof(hw_params));
952         hw_params.fb_base = adev->gmc.fb_start;
953         hw_params.fb_offset = adev->gmc.aper_base;
954
955         /* backdoor load firmware and trigger dmub running */
956         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
957                 hw_params.load_inst_const = true;
958
959         if (dmcu)
960                 hw_params.psp_version = dmcu->psp_version;
961
962         for (i = 0; i < fb_info->num_fb; ++i)
963                 hw_params.fb[i] = &fb_info->fb[i];
964
965         status = dmub_srv_hw_init(dmub_srv, &hw_params);
966         if (status != DMUB_STATUS_OK) {
967                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
968                 return -EINVAL;
969         }
970
971         /* Wait for firmware load to finish. */
972         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
973         if (status != DMUB_STATUS_OK)
974                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
975
976         /* Init DMCU and ABM if available. */
977         if (dmcu && abm) {
978                 dmcu->funcs->dmcu_init(dmcu);
979                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
980         }
981
982         adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
983         if (!adev->dm.dc->ctx->dmub_srv) {
984                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
985                 return -ENOMEM;
986         }
987
988         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
989                  adev->dm.dmcub_fw_version);
990
991         return 0;
992 }
993
994 #if defined(CONFIG_DRM_AMD_DC_DCN)
995 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
996 {
997         uint64_t pt_base;
998         uint32_t logical_addr_low;
999         uint32_t logical_addr_high;
1000         uint32_t agp_base, agp_bot, agp_top;
1001         PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1002
1003         logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1004         pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1005
1006         if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1007                 /*
1008                  * Raven2 has a HW issue that it is unable to use the vram which
1009                  * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1010                  * workaround that increase system aperture high address (add 1)
1011                  * to get rid of the VM fault and hardware hang.
1012                  */
1013                 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1014         else
1015                 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1016
1017         agp_base = 0;
1018         agp_bot = adev->gmc.agp_start >> 24;
1019         agp_top = adev->gmc.agp_end >> 24;
1020
1021
1022         page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1023         page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1024         page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1025         page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1026         page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1027         page_table_base.low_part = lower_32_bits(pt_base);
1028
1029         pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1030         pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1031
1032         pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1033         pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1034         pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1035
1036         pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1037         pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1038         pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1039
1040         pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1041         pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1042         pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1043
1044         pa_config->is_hvm_enabled = 0;
1045
1046 }
1047 #endif
1048 #if defined(CONFIG_DRM_AMD_DC_DCN)
1049 static void event_mall_stutter(struct work_struct *work)
1050 {
1051
1052         struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
1053         struct amdgpu_display_manager *dm = vblank_work->dm;
1054
1055         mutex_lock(&dm->dc_lock);
1056
1057         if (vblank_work->enable)
1058                 dm->active_vblank_irq_count++;
1059         else if(dm->active_vblank_irq_count)
1060                 dm->active_vblank_irq_count--;
1061
1062         dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1063
1064         DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1065
1066         mutex_unlock(&dm->dc_lock);
1067 }
1068
1069 static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
1070 {
1071
1072         int max_caps = dc->caps.max_links;
1073         struct vblank_workqueue *vblank_work;
1074         int i = 0;
1075
1076         vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
1077         if (ZERO_OR_NULL_PTR(vblank_work)) {
1078                 kfree(vblank_work);
1079                 return NULL;
1080         }
1081
1082         for (i = 0; i < max_caps; i++)
1083                 INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
1084
1085         return vblank_work;
1086 }
1087 #endif
1088 static int amdgpu_dm_init(struct amdgpu_device *adev)
1089 {
1090         struct dc_init_data init_data;
1091 #ifdef CONFIG_DRM_AMD_DC_HDCP
1092         struct dc_callback_init init_params;
1093 #endif
1094         int r;
1095
1096         adev->dm.ddev = adev_to_drm(adev);
1097         adev->dm.adev = adev;
1098
1099         /* Zero all the fields */
1100         memset(&init_data, 0, sizeof(init_data));
1101 #ifdef CONFIG_DRM_AMD_DC_HDCP
1102         memset(&init_params, 0, sizeof(init_params));
1103 #endif
1104
1105         mutex_init(&adev->dm.dc_lock);
1106         mutex_init(&adev->dm.audio_lock);
1107 #if defined(CONFIG_DRM_AMD_DC_DCN)
1108         spin_lock_init(&adev->dm.vblank_lock);
1109 #endif
1110
1111         if(amdgpu_dm_irq_init(adev)) {
1112                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1113                 goto error;
1114         }
1115
1116         init_data.asic_id.chip_family = adev->family;
1117
1118         init_data.asic_id.pci_revision_id = adev->pdev->revision;
1119         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1120
1121         init_data.asic_id.vram_width = adev->gmc.vram_width;
1122         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1123         init_data.asic_id.atombios_base_address =
1124                 adev->mode_info.atom_context->bios;
1125
1126         init_data.driver = adev;
1127
1128         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1129
1130         if (!adev->dm.cgs_device) {
1131                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1132                 goto error;
1133         }
1134
1135         init_data.cgs_device = adev->dm.cgs_device;
1136
1137         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1138
1139         switch (adev->asic_type) {
1140         case CHIP_CARRIZO:
1141         case CHIP_STONEY:
1142         case CHIP_RAVEN:
1143         case CHIP_RENOIR:
1144                 init_data.flags.gpu_vm_support = true;
1145                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1146                         init_data.flags.disable_dmcu = true;
1147                 break;
1148 #if defined(CONFIG_DRM_AMD_DC_DCN)
1149         case CHIP_VANGOGH:
1150                 init_data.flags.gpu_vm_support = true;
1151                 break;
1152 #endif
1153         default:
1154                 break;
1155         }
1156
1157         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1158                 init_data.flags.fbc_support = true;
1159
1160         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1161                 init_data.flags.multi_mon_pp_mclk_switch = true;
1162
1163         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1164                 init_data.flags.disable_fractional_pwm = true;
1165
1166         init_data.flags.power_down_display_on_boot = true;
1167
1168         INIT_LIST_HEAD(&adev->dm.da_list);
1169         /* Display Core create. */
1170         adev->dm.dc = dc_create(&init_data);
1171
1172         if (adev->dm.dc) {
1173                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1174         } else {
1175                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1176                 goto error;
1177         }
1178
1179         if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1180                 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1181                 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1182         }
1183
1184         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1185                 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1186
1187         if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1188                 adev->dm.dc->debug.disable_stutter = true;
1189
1190         if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1191                 adev->dm.dc->debug.disable_dsc = true;
1192
1193         if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1194                 adev->dm.dc->debug.disable_clock_gate = true;
1195
1196         r = dm_dmub_hw_init(adev);
1197         if (r) {
1198                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1199                 goto error;
1200         }
1201
1202         dc_hardware_init(adev->dm.dc);
1203
1204 #if defined(CONFIG_DRM_AMD_DC_DCN)
1205         if (adev->apu_flags) {
1206                 struct dc_phy_addr_space_config pa_config;
1207
1208                 mmhub_read_system_context(adev, &pa_config);
1209
1210                 // Call the DC init_memory func
1211                 dc_setup_system_context(adev->dm.dc, &pa_config);
1212         }
1213 #endif
1214
1215         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1216         if (!adev->dm.freesync_module) {
1217                 DRM_ERROR(
1218                 "amdgpu: failed to initialize freesync_module.\n");
1219         } else
1220                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1221                                 adev->dm.freesync_module);
1222
1223         amdgpu_dm_init_color_mod();
1224
1225 #if defined(CONFIG_DRM_AMD_DC_DCN)
1226         if (adev->dm.dc->caps.max_links > 0) {
1227                 adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
1228
1229                 if (!adev->dm.vblank_workqueue)
1230                         DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1231                 else
1232                         DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
1233         }
1234 #endif
1235
1236 #ifdef CONFIG_DRM_AMD_DC_HDCP
1237         if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1238                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1239
1240                 if (!adev->dm.hdcp_workqueue)
1241                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1242                 else
1243                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1244
1245                 dc_init_callbacks(adev->dm.dc, &init_params);
1246         }
1247 #endif
1248 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1249         adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1250 #endif
1251         if (dc_enable_dmub_notifications(adev->dm.dc)) {
1252                 init_completion(&adev->dm.dmub_aux_transfer_done);
1253                 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1254                 if (!adev->dm.dmub_notify) {
1255                         DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1256                         goto error;
1257                 }
1258                 amdgpu_dm_outbox_init(adev);
1259         }
1260
1261         if (amdgpu_dm_initialize_drm_device(adev)) {
1262                 DRM_ERROR(
1263                 "amdgpu: failed to initialize sw for display support.\n");
1264                 goto error;
1265         }
1266
1267         /* create fake encoders for MST */
1268         dm_dp_create_fake_mst_encoders(adev);
1269
1270         /* TODO: Add_display_info? */
1271
1272         /* TODO use dynamic cursor width */
1273         adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1274         adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1275
1276         if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1277                 DRM_ERROR(
1278                 "amdgpu: failed to initialize sw for display support.\n");
1279                 goto error;
1280         }
1281
1282
1283         DRM_DEBUG_DRIVER("KMS initialized.\n");
1284
1285         return 0;
1286 error:
1287         amdgpu_dm_fini(adev);
1288
1289         return -EINVAL;
1290 }
1291
1292 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1293 {
1294         int i;
1295
1296         for (i = 0; i < adev->dm.display_indexes_num; i++) {
1297                 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1298         }
1299
1300         amdgpu_dm_audio_fini(adev);
1301
1302         amdgpu_dm_destroy_drm_device(&adev->dm);
1303
1304 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1305         if (adev->dm.crc_rd_wrk) {
1306                 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1307                 kfree(adev->dm.crc_rd_wrk);
1308                 adev->dm.crc_rd_wrk = NULL;
1309         }
1310 #endif
1311 #ifdef CONFIG_DRM_AMD_DC_HDCP
1312         if (adev->dm.hdcp_workqueue) {
1313                 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1314                 adev->dm.hdcp_workqueue = NULL;
1315         }
1316
1317         if (adev->dm.dc)
1318                 dc_deinit_callbacks(adev->dm.dc);
1319 #endif
1320
1321 #if defined(CONFIG_DRM_AMD_DC_DCN)
1322         if (adev->dm.vblank_workqueue) {
1323                 adev->dm.vblank_workqueue->dm = NULL;
1324                 kfree(adev->dm.vblank_workqueue);
1325                 adev->dm.vblank_workqueue = NULL;
1326         }
1327 #endif
1328
1329         if (adev->dm.dc->ctx->dmub_srv) {
1330                 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1331                 adev->dm.dc->ctx->dmub_srv = NULL;
1332         }
1333
1334         if (dc_enable_dmub_notifications(adev->dm.dc)) {
1335                 kfree(adev->dm.dmub_notify);
1336                 adev->dm.dmub_notify = NULL;
1337         }
1338
1339         if (adev->dm.dmub_bo)
1340                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1341                                       &adev->dm.dmub_bo_gpu_addr,
1342                                       &adev->dm.dmub_bo_cpu_addr);
1343
1344         /* DC Destroy TODO: Replace destroy DAL */
1345         if (adev->dm.dc)
1346                 dc_destroy(&adev->dm.dc);
1347         /*
1348          * TODO: pageflip, vlank interrupt
1349          *
1350          * amdgpu_dm_irq_fini(adev);
1351          */
1352
1353         if (adev->dm.cgs_device) {
1354                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1355                 adev->dm.cgs_device = NULL;
1356         }
1357         if (adev->dm.freesync_module) {
1358                 mod_freesync_destroy(adev->dm.freesync_module);
1359                 adev->dm.freesync_module = NULL;
1360         }
1361
1362         mutex_destroy(&adev->dm.audio_lock);
1363         mutex_destroy(&adev->dm.dc_lock);
1364
1365         return;
1366 }
1367
1368 static int load_dmcu_fw(struct amdgpu_device *adev)
1369 {
1370         const char *fw_name_dmcu = NULL;
1371         int r;
1372         const struct dmcu_firmware_header_v1_0 *hdr;
1373
1374         switch(adev->asic_type) {
1375 #if defined(CONFIG_DRM_AMD_DC_SI)
1376         case CHIP_TAHITI:
1377         case CHIP_PITCAIRN:
1378         case CHIP_VERDE:
1379         case CHIP_OLAND:
1380 #endif
1381         case CHIP_BONAIRE:
1382         case CHIP_HAWAII:
1383         case CHIP_KAVERI:
1384         case CHIP_KABINI:
1385         case CHIP_MULLINS:
1386         case CHIP_TONGA:
1387         case CHIP_FIJI:
1388         case CHIP_CARRIZO:
1389         case CHIP_STONEY:
1390         case CHIP_POLARIS11:
1391         case CHIP_POLARIS10:
1392         case CHIP_POLARIS12:
1393         case CHIP_VEGAM:
1394         case CHIP_VEGA10:
1395         case CHIP_VEGA12:
1396         case CHIP_VEGA20:
1397         case CHIP_NAVI10:
1398         case CHIP_NAVI14:
1399         case CHIP_RENOIR:
1400         case CHIP_SIENNA_CICHLID:
1401         case CHIP_NAVY_FLOUNDER:
1402         case CHIP_DIMGREY_CAVEFISH:
1403         case CHIP_VANGOGH:
1404                 return 0;
1405         case CHIP_NAVI12:
1406                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1407                 break;
1408         case CHIP_RAVEN:
1409                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1410                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1411                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1412                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1413                 else
1414                         return 0;
1415                 break;
1416         default:
1417                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1418                 return -EINVAL;
1419         }
1420
1421         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1422                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1423                 return 0;
1424         }
1425
1426         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1427         if (r == -ENOENT) {
1428                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1429                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1430                 adev->dm.fw_dmcu = NULL;
1431                 return 0;
1432         }
1433         if (r) {
1434                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1435                         fw_name_dmcu);
1436                 return r;
1437         }
1438
1439         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1440         if (r) {
1441                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1442                         fw_name_dmcu);
1443                 release_firmware(adev->dm.fw_dmcu);
1444                 adev->dm.fw_dmcu = NULL;
1445                 return r;
1446         }
1447
1448         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1449         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1450         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1451         adev->firmware.fw_size +=
1452                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1453
1454         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1455         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1456         adev->firmware.fw_size +=
1457                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1458
1459         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1460
1461         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1462
1463         return 0;
1464 }
1465
1466 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1467 {
1468         struct amdgpu_device *adev = ctx;
1469
1470         return dm_read_reg(adev->dm.dc->ctx, address);
1471 }
1472
1473 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1474                                      uint32_t value)
1475 {
1476         struct amdgpu_device *adev = ctx;
1477
1478         return dm_write_reg(adev->dm.dc->ctx, address, value);
1479 }
1480
1481 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1482 {
1483         struct dmub_srv_create_params create_params;
1484         struct dmub_srv_region_params region_params;
1485         struct dmub_srv_region_info region_info;
1486         struct dmub_srv_fb_params fb_params;
1487         struct dmub_srv_fb_info *fb_info;
1488         struct dmub_srv *dmub_srv;
1489         const struct dmcub_firmware_header_v1_0 *hdr;
1490         const char *fw_name_dmub;
1491         enum dmub_asic dmub_asic;
1492         enum dmub_status status;
1493         int r;
1494
1495         switch (adev->asic_type) {
1496         case CHIP_RENOIR:
1497                 dmub_asic = DMUB_ASIC_DCN21;
1498                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1499                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1500                         fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1501                 break;
1502         case CHIP_SIENNA_CICHLID:
1503                 dmub_asic = DMUB_ASIC_DCN30;
1504                 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1505                 break;
1506         case CHIP_NAVY_FLOUNDER:
1507                 dmub_asic = DMUB_ASIC_DCN30;
1508                 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1509                 break;
1510         case CHIP_VANGOGH:
1511                 dmub_asic = DMUB_ASIC_DCN301;
1512                 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1513                 break;
1514         case CHIP_DIMGREY_CAVEFISH:
1515                 dmub_asic = DMUB_ASIC_DCN302;
1516                 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1517                 break;
1518
1519         default:
1520                 /* ASIC doesn't support DMUB. */
1521                 return 0;
1522         }
1523
1524         r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1525         if (r) {
1526                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1527                 return 0;
1528         }
1529
1530         r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1531         if (r) {
1532                 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1533                 return 0;
1534         }
1535
1536         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1537
1538         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1539                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1540                         AMDGPU_UCODE_ID_DMCUB;
1541                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1542                         adev->dm.dmub_fw;
1543                 adev->firmware.fw_size +=
1544                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1545
1546                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1547                          adev->dm.dmcub_fw_version);
1548         }
1549
1550         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1551
1552         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1553         dmub_srv = adev->dm.dmub_srv;
1554
1555         if (!dmub_srv) {
1556                 DRM_ERROR("Failed to allocate DMUB service!\n");
1557                 return -ENOMEM;
1558         }
1559
1560         memset(&create_params, 0, sizeof(create_params));
1561         create_params.user_ctx = adev;
1562         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1563         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1564         create_params.asic = dmub_asic;
1565
1566         /* Create the DMUB service. */
1567         status = dmub_srv_create(dmub_srv, &create_params);
1568         if (status != DMUB_STATUS_OK) {
1569                 DRM_ERROR("Error creating DMUB service: %d\n", status);
1570                 return -EINVAL;
1571         }
1572
1573         /* Calculate the size of all the regions for the DMUB service. */
1574         memset(&region_params, 0, sizeof(region_params));
1575
1576         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1577                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1578         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1579         region_params.vbios_size = adev->bios_size;
1580         region_params.fw_bss_data = region_params.bss_data_size ?
1581                 adev->dm.dmub_fw->data +
1582                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1583                 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1584         region_params.fw_inst_const =
1585                 adev->dm.dmub_fw->data +
1586                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1587                 PSP_HEADER_BYTES;
1588
1589         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1590                                            &region_info);
1591
1592         if (status != DMUB_STATUS_OK) {
1593                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1594                 return -EINVAL;
1595         }
1596
1597         /*
1598          * Allocate a framebuffer based on the total size of all the regions.
1599          * TODO: Move this into GART.
1600          */
1601         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1602                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1603                                     &adev->dm.dmub_bo_gpu_addr,
1604                                     &adev->dm.dmub_bo_cpu_addr);
1605         if (r)
1606                 return r;
1607
1608         /* Rebase the regions on the framebuffer address. */
1609         memset(&fb_params, 0, sizeof(fb_params));
1610         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1611         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1612         fb_params.region_info = &region_info;
1613
1614         adev->dm.dmub_fb_info =
1615                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1616         fb_info = adev->dm.dmub_fb_info;
1617
1618         if (!fb_info) {
1619                 DRM_ERROR(
1620                         "Failed to allocate framebuffer info for DMUB service!\n");
1621                 return -ENOMEM;
1622         }
1623
1624         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1625         if (status != DMUB_STATUS_OK) {
1626                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1627                 return -EINVAL;
1628         }
1629
1630         return 0;
1631 }
1632
1633 static int dm_sw_init(void *handle)
1634 {
1635         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1636         int r;
1637
1638         r = dm_dmub_sw_init(adev);
1639         if (r)
1640                 return r;
1641
1642         return load_dmcu_fw(adev);
1643 }
1644
1645 static int dm_sw_fini(void *handle)
1646 {
1647         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1648
1649         kfree(adev->dm.dmub_fb_info);
1650         adev->dm.dmub_fb_info = NULL;
1651
1652         if (adev->dm.dmub_srv) {
1653                 dmub_srv_destroy(adev->dm.dmub_srv);
1654                 adev->dm.dmub_srv = NULL;
1655         }
1656
1657         release_firmware(adev->dm.dmub_fw);
1658         adev->dm.dmub_fw = NULL;
1659
1660         release_firmware(adev->dm.fw_dmcu);
1661         adev->dm.fw_dmcu = NULL;
1662
1663         return 0;
1664 }
1665
1666 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1667 {
1668         struct amdgpu_dm_connector *aconnector;
1669         struct drm_connector *connector;
1670         struct drm_connector_list_iter iter;
1671         int ret = 0;
1672
1673         drm_connector_list_iter_begin(dev, &iter);
1674         drm_for_each_connector_iter(connector, &iter) {
1675                 aconnector = to_amdgpu_dm_connector(connector);
1676                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1677                     aconnector->mst_mgr.aux) {
1678                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1679                                          aconnector,
1680                                          aconnector->base.base.id);
1681
1682                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1683                         if (ret < 0) {
1684                                 DRM_ERROR("DM_MST: Failed to start MST\n");
1685                                 aconnector->dc_link->type =
1686                                         dc_connection_single;
1687                                 break;
1688                         }
1689                 }
1690         }
1691         drm_connector_list_iter_end(&iter);
1692
1693         return ret;
1694 }
1695
1696 static int dm_late_init(void *handle)
1697 {
1698         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1699
1700         struct dmcu_iram_parameters params;
1701         unsigned int linear_lut[16];
1702         int i;
1703         struct dmcu *dmcu = NULL;
1704         bool ret = true;
1705
1706         dmcu = adev->dm.dc->res_pool->dmcu;
1707
1708         for (i = 0; i < 16; i++)
1709                 linear_lut[i] = 0xFFFF * i / 15;
1710
1711         params.set = 0;
1712         params.backlight_ramping_start = 0xCCCC;
1713         params.backlight_ramping_reduction = 0xCCCCCCCC;
1714         params.backlight_lut_array_size = 16;
1715         params.backlight_lut_array = linear_lut;
1716
1717         /* Min backlight level after ABM reduction,  Don't allow below 1%
1718          * 0xFFFF x 0.01 = 0x28F
1719          */
1720         params.min_abm_backlight = 0x28F;
1721
1722         /* In the case where abm is implemented on dmcub,
1723          * dmcu object will be null.
1724          * ABM 2.4 and up are implemented on dmcub.
1725          */
1726         if (dmcu)
1727                 ret = dmcu_load_iram(dmcu, params);
1728         else if (adev->dm.dc->ctx->dmub_srv)
1729                 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1730
1731         if (!ret)
1732                 return -EINVAL;
1733
1734         return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1735 }
1736
1737 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1738 {
1739         struct amdgpu_dm_connector *aconnector;
1740         struct drm_connector *connector;
1741         struct drm_connector_list_iter iter;
1742         struct drm_dp_mst_topology_mgr *mgr;
1743         int ret;
1744         bool need_hotplug = false;
1745
1746         drm_connector_list_iter_begin(dev, &iter);
1747         drm_for_each_connector_iter(connector, &iter) {
1748                 aconnector = to_amdgpu_dm_connector(connector);
1749                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1750                     aconnector->mst_port)
1751                         continue;
1752
1753                 mgr = &aconnector->mst_mgr;
1754
1755                 if (suspend) {
1756                         drm_dp_mst_topology_mgr_suspend(mgr);
1757                 } else {
1758                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1759                         if (ret < 0) {
1760                                 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1761                                 need_hotplug = true;
1762                         }
1763                 }
1764         }
1765         drm_connector_list_iter_end(&iter);
1766
1767         if (need_hotplug)
1768                 drm_kms_helper_hotplug_event(dev);
1769 }
1770
1771 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1772 {
1773         struct smu_context *smu = &adev->smu;
1774         int ret = 0;
1775
1776         if (!is_support_sw_smu(adev))
1777                 return 0;
1778
1779         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1780          * on window driver dc implementation.
1781          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1782          * should be passed to smu during boot up and resume from s3.
1783          * boot up: dc calculate dcn watermark clock settings within dc_create,
1784          * dcn20_resource_construct
1785          * then call pplib functions below to pass the settings to smu:
1786          * smu_set_watermarks_for_clock_ranges
1787          * smu_set_watermarks_table
1788          * navi10_set_watermarks_table
1789          * smu_write_watermarks_table
1790          *
1791          * For Renoir, clock settings of dcn watermark are also fixed values.
1792          * dc has implemented different flow for window driver:
1793          * dc_hardware_init / dc_set_power_state
1794          * dcn10_init_hw
1795          * notify_wm_ranges
1796          * set_wm_ranges
1797          * -- Linux
1798          * smu_set_watermarks_for_clock_ranges
1799          * renoir_set_watermarks_table
1800          * smu_write_watermarks_table
1801          *
1802          * For Linux,
1803          * dc_hardware_init -> amdgpu_dm_init
1804          * dc_set_power_state --> dm_resume
1805          *
1806          * therefore, this function apply to navi10/12/14 but not Renoir
1807          * *
1808          */
1809         switch(adev->asic_type) {
1810         case CHIP_NAVI10:
1811         case CHIP_NAVI14:
1812         case CHIP_NAVI12:
1813                 break;
1814         default:
1815                 return 0;
1816         }
1817
1818         ret = smu_write_watermarks_table(smu);
1819         if (ret) {
1820                 DRM_ERROR("Failed to update WMTABLE!\n");
1821                 return ret;
1822         }
1823
1824         return 0;
1825 }
1826
1827 /**
1828  * dm_hw_init() - Initialize DC device
1829  * @handle: The base driver device containing the amdgpu_dm device.
1830  *
1831  * Initialize the &struct amdgpu_display_manager device. This involves calling
1832  * the initializers of each DM component, then populating the struct with them.
1833  *
1834  * Although the function implies hardware initialization, both hardware and
1835  * software are initialized here. Splitting them out to their relevant init
1836  * hooks is a future TODO item.
1837  *
1838  * Some notable things that are initialized here:
1839  *
1840  * - Display Core, both software and hardware
1841  * - DC modules that we need (freesync and color management)
1842  * - DRM software states
1843  * - Interrupt sources and handlers
1844  * - Vblank support
1845  * - Debug FS entries, if enabled
1846  */
1847 static int dm_hw_init(void *handle)
1848 {
1849         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1850         /* Create DAL display manager */
1851         amdgpu_dm_init(adev);
1852         amdgpu_dm_hpd_init(adev);
1853
1854         return 0;
1855 }
1856
1857 /**
1858  * dm_hw_fini() - Teardown DC device
1859  * @handle: The base driver device containing the amdgpu_dm device.
1860  *
1861  * Teardown components within &struct amdgpu_display_manager that require
1862  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1863  * were loaded. Also flush IRQ workqueues and disable them.
1864  */
1865 static int dm_hw_fini(void *handle)
1866 {
1867         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1868
1869         amdgpu_dm_hpd_fini(adev);
1870
1871         amdgpu_dm_irq_fini(adev);
1872         amdgpu_dm_fini(adev);
1873         return 0;
1874 }
1875
1876
1877 static int dm_enable_vblank(struct drm_crtc *crtc);
1878 static void dm_disable_vblank(struct drm_crtc *crtc);
1879
1880 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1881                                  struct dc_state *state, bool enable)
1882 {
1883         enum dc_irq_source irq_source;
1884         struct amdgpu_crtc *acrtc;
1885         int rc = -EBUSY;
1886         int i = 0;
1887
1888         for (i = 0; i < state->stream_count; i++) {
1889                 acrtc = get_crtc_by_otg_inst(
1890                                 adev, state->stream_status[i].primary_otg_inst);
1891
1892                 if (acrtc && state->stream_status[i].plane_count != 0) {
1893                         irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1894                         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1895                         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
1896                                       acrtc->crtc_id, enable ? "en" : "dis", rc);
1897                         if (rc)
1898                                 DRM_WARN("Failed to %s pflip interrupts\n",
1899                                          enable ? "enable" : "disable");
1900
1901                         if (enable) {
1902                                 rc = dm_enable_vblank(&acrtc->base);
1903                                 if (rc)
1904                                         DRM_WARN("Failed to enable vblank interrupts\n");
1905                         } else {
1906                                 dm_disable_vblank(&acrtc->base);
1907                         }
1908
1909                 }
1910         }
1911
1912 }
1913
1914 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1915 {
1916         struct dc_state *context = NULL;
1917         enum dc_status res = DC_ERROR_UNEXPECTED;
1918         int i;
1919         struct dc_stream_state *del_streams[MAX_PIPES];
1920         int del_streams_count = 0;
1921
1922         memset(del_streams, 0, sizeof(del_streams));
1923
1924         context = dc_create_state(dc);
1925         if (context == NULL)
1926                 goto context_alloc_fail;
1927
1928         dc_resource_state_copy_construct_current(dc, context);
1929
1930         /* First remove from context all streams */
1931         for (i = 0; i < context->stream_count; i++) {
1932                 struct dc_stream_state *stream = context->streams[i];
1933
1934                 del_streams[del_streams_count++] = stream;
1935         }
1936
1937         /* Remove all planes for removed streams and then remove the streams */
1938         for (i = 0; i < del_streams_count; i++) {
1939                 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1940                         res = DC_FAIL_DETACH_SURFACES;
1941                         goto fail;
1942                 }
1943
1944                 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1945                 if (res != DC_OK)
1946                         goto fail;
1947         }
1948
1949
1950         res = dc_validate_global_state(dc, context, false);
1951
1952         if (res != DC_OK) {
1953                 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1954                 goto fail;
1955         }
1956
1957         res = dc_commit_state(dc, context);
1958
1959 fail:
1960         dc_release_state(context);
1961
1962 context_alloc_fail:
1963         return res;
1964 }
1965
1966 static int dm_suspend(void *handle)
1967 {
1968         struct amdgpu_device *adev = handle;
1969         struct amdgpu_display_manager *dm = &adev->dm;
1970         int ret = 0;
1971
1972         if (amdgpu_in_reset(adev)) {
1973                 mutex_lock(&dm->dc_lock);
1974
1975 #if defined(CONFIG_DRM_AMD_DC_DCN)
1976                 dc_allow_idle_optimizations(adev->dm.dc, false);
1977 #endif
1978
1979                 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1980
1981                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1982
1983                 amdgpu_dm_commit_zero_streams(dm->dc);
1984
1985                 amdgpu_dm_irq_suspend(adev);
1986
1987                 return ret;
1988         }
1989
1990 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
1991         amdgpu_dm_crtc_secure_display_suspend(adev);
1992 #endif
1993         WARN_ON(adev->dm.cached_state);
1994         adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1995
1996         s3_handle_mst(adev_to_drm(adev), true);
1997
1998         amdgpu_dm_irq_suspend(adev);
1999
2000
2001         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2002
2003         return 0;
2004 }
2005
2006 static struct amdgpu_dm_connector *
2007 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2008                                              struct drm_crtc *crtc)
2009 {
2010         uint32_t i;
2011         struct drm_connector_state *new_con_state;
2012         struct drm_connector *connector;
2013         struct drm_crtc *crtc_from_state;
2014
2015         for_each_new_connector_in_state(state, connector, new_con_state, i) {
2016                 crtc_from_state = new_con_state->crtc;
2017
2018                 if (crtc_from_state == crtc)
2019                         return to_amdgpu_dm_connector(connector);
2020         }
2021
2022         return NULL;
2023 }
2024
2025 static void emulated_link_detect(struct dc_link *link)
2026 {
2027         struct dc_sink_init_data sink_init_data = { 0 };
2028         struct display_sink_capability sink_caps = { 0 };
2029         enum dc_edid_status edid_status;
2030         struct dc_context *dc_ctx = link->ctx;
2031         struct dc_sink *sink = NULL;
2032         struct dc_sink *prev_sink = NULL;
2033
2034         link->type = dc_connection_none;
2035         prev_sink = link->local_sink;
2036
2037         if (prev_sink)
2038                 dc_sink_release(prev_sink);
2039
2040         switch (link->connector_signal) {
2041         case SIGNAL_TYPE_HDMI_TYPE_A: {
2042                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2043                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2044                 break;
2045         }
2046
2047         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2048                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2049                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2050                 break;
2051         }
2052
2053         case SIGNAL_TYPE_DVI_DUAL_LINK: {
2054                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2055                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2056                 break;
2057         }
2058
2059         case SIGNAL_TYPE_LVDS: {
2060                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2061                 sink_caps.signal = SIGNAL_TYPE_LVDS;
2062                 break;
2063         }
2064
2065         case SIGNAL_TYPE_EDP: {
2066                 sink_caps.transaction_type =
2067                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2068                 sink_caps.signal = SIGNAL_TYPE_EDP;
2069                 break;
2070         }
2071
2072         case SIGNAL_TYPE_DISPLAY_PORT: {
2073                 sink_caps.transaction_type =
2074                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2075                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2076                 break;
2077         }
2078
2079         default:
2080                 DC_ERROR("Invalid connector type! signal:%d\n",
2081                         link->connector_signal);
2082                 return;
2083         }
2084
2085         sink_init_data.link = link;
2086         sink_init_data.sink_signal = sink_caps.signal;
2087
2088         sink = dc_sink_create(&sink_init_data);
2089         if (!sink) {
2090                 DC_ERROR("Failed to create sink!\n");
2091                 return;
2092         }
2093
2094         /* dc_sink_create returns a new reference */
2095         link->local_sink = sink;
2096
2097         edid_status = dm_helpers_read_local_edid(
2098                         link->ctx,
2099                         link,
2100                         sink);
2101
2102         if (edid_status != EDID_OK)
2103                 DC_ERROR("Failed to read EDID");
2104
2105 }
2106
2107 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2108                                      struct amdgpu_display_manager *dm)
2109 {
2110         struct {
2111                 struct dc_surface_update surface_updates[MAX_SURFACES];
2112                 struct dc_plane_info plane_infos[MAX_SURFACES];
2113                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2114                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2115                 struct dc_stream_update stream_update;
2116         } * bundle;
2117         int k, m;
2118
2119         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2120
2121         if (!bundle) {
2122                 dm_error("Failed to allocate update bundle\n");
2123                 goto cleanup;
2124         }
2125
2126         for (k = 0; k < dc_state->stream_count; k++) {
2127                 bundle->stream_update.stream = dc_state->streams[k];
2128
2129                 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2130                         bundle->surface_updates[m].surface =
2131                                 dc_state->stream_status->plane_states[m];
2132                         bundle->surface_updates[m].surface->force_full_update =
2133                                 true;
2134                 }
2135                 dc_commit_updates_for_stream(
2136                         dm->dc, bundle->surface_updates,
2137                         dc_state->stream_status->plane_count,
2138                         dc_state->streams[k], &bundle->stream_update, dc_state);
2139         }
2140
2141 cleanup:
2142         kfree(bundle);
2143
2144         return;
2145 }
2146
2147 static void dm_set_dpms_off(struct dc_link *link)
2148 {
2149         struct dc_stream_state *stream_state;
2150         struct amdgpu_dm_connector *aconnector = link->priv;
2151         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2152         struct dc_stream_update stream_update;
2153         bool dpms_off = true;
2154
2155         memset(&stream_update, 0, sizeof(stream_update));
2156         stream_update.dpms_off = &dpms_off;
2157
2158         mutex_lock(&adev->dm.dc_lock);
2159         stream_state = dc_stream_find_from_link(link);
2160
2161         if (stream_state == NULL) {
2162                 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2163                 mutex_unlock(&adev->dm.dc_lock);
2164                 return;
2165         }
2166
2167         stream_update.stream = stream_state;
2168         dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2169                                      stream_state, &stream_update,
2170                                      stream_state->ctx->dc->current_state);
2171         mutex_unlock(&adev->dm.dc_lock);
2172 }
2173
2174 static int dm_resume(void *handle)
2175 {
2176         struct amdgpu_device *adev = handle;
2177         struct drm_device *ddev = adev_to_drm(adev);
2178         struct amdgpu_display_manager *dm = &adev->dm;
2179         struct amdgpu_dm_connector *aconnector;
2180         struct drm_connector *connector;
2181         struct drm_connector_list_iter iter;
2182         struct drm_crtc *crtc;
2183         struct drm_crtc_state *new_crtc_state;
2184         struct dm_crtc_state *dm_new_crtc_state;
2185         struct drm_plane *plane;
2186         struct drm_plane_state *new_plane_state;
2187         struct dm_plane_state *dm_new_plane_state;
2188         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2189         enum dc_connection_type new_connection_type = dc_connection_none;
2190         struct dc_state *dc_state;
2191         int i, r, j;
2192
2193         if (amdgpu_in_reset(adev)) {
2194                 dc_state = dm->cached_dc_state;
2195
2196                 r = dm_dmub_hw_init(adev);
2197                 if (r)
2198                         DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2199
2200                 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2201                 dc_resume(dm->dc);
2202
2203                 amdgpu_dm_irq_resume_early(adev);
2204
2205                 for (i = 0; i < dc_state->stream_count; i++) {
2206                         dc_state->streams[i]->mode_changed = true;
2207                         for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2208                                 dc_state->stream_status->plane_states[j]->update_flags.raw
2209                                         = 0xffffffff;
2210                         }
2211                 }
2212
2213                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2214
2215                 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2216
2217                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2218
2219                 dc_release_state(dm->cached_dc_state);
2220                 dm->cached_dc_state = NULL;
2221
2222                 amdgpu_dm_irq_resume_late(adev);
2223
2224                 mutex_unlock(&dm->dc_lock);
2225
2226                 return 0;
2227         }
2228         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2229         dc_release_state(dm_state->context);
2230         dm_state->context = dc_create_state(dm->dc);
2231         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2232         dc_resource_state_construct(dm->dc, dm_state->context);
2233
2234         /* Before powering on DC we need to re-initialize DMUB. */
2235         r = dm_dmub_hw_init(adev);
2236         if (r)
2237                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2238
2239         /* power on hardware */
2240         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2241
2242         /* program HPD filter */
2243         dc_resume(dm->dc);
2244
2245         /*
2246          * early enable HPD Rx IRQ, should be done before set mode as short
2247          * pulse interrupts are used for MST
2248          */
2249         amdgpu_dm_irq_resume_early(adev);
2250
2251         /* On resume we need to rewrite the MSTM control bits to enable MST*/
2252         s3_handle_mst(ddev, false);
2253
2254         /* Do detection*/
2255         drm_connector_list_iter_begin(ddev, &iter);
2256         drm_for_each_connector_iter(connector, &iter) {
2257                 aconnector = to_amdgpu_dm_connector(connector);
2258
2259                 /*
2260                  * this is the case when traversing through already created
2261                  * MST connectors, should be skipped
2262                  */
2263                 if (aconnector->mst_port)
2264                         continue;
2265
2266                 mutex_lock(&aconnector->hpd_lock);
2267                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2268                         DRM_ERROR("KMS: Failed to detect connector\n");
2269
2270                 if (aconnector->base.force && new_connection_type == dc_connection_none)
2271                         emulated_link_detect(aconnector->dc_link);
2272                 else
2273                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2274
2275                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2276                         aconnector->fake_enable = false;
2277
2278                 if (aconnector->dc_sink)
2279                         dc_sink_release(aconnector->dc_sink);
2280                 aconnector->dc_sink = NULL;
2281                 amdgpu_dm_update_connector_after_detect(aconnector);
2282                 mutex_unlock(&aconnector->hpd_lock);
2283         }
2284         drm_connector_list_iter_end(&iter);
2285
2286         /* Force mode set in atomic commit */
2287         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2288                 new_crtc_state->active_changed = true;
2289
2290         /*
2291          * atomic_check is expected to create the dc states. We need to release
2292          * them here, since they were duplicated as part of the suspend
2293          * procedure.
2294          */
2295         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2296                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2297                 if (dm_new_crtc_state->stream) {
2298                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2299                         dc_stream_release(dm_new_crtc_state->stream);
2300                         dm_new_crtc_state->stream = NULL;
2301                 }
2302         }
2303
2304         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2305                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2306                 if (dm_new_plane_state->dc_state) {
2307                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2308                         dc_plane_state_release(dm_new_plane_state->dc_state);
2309                         dm_new_plane_state->dc_state = NULL;
2310                 }
2311         }
2312
2313         drm_atomic_helper_resume(ddev, dm->cached_state);
2314
2315         dm->cached_state = NULL;
2316
2317 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
2318         amdgpu_dm_crtc_secure_display_resume(adev);
2319 #endif
2320
2321         amdgpu_dm_irq_resume_late(adev);
2322
2323         amdgpu_dm_smu_write_watermarks_table(adev);
2324
2325         return 0;
2326 }
2327
2328 /**
2329  * DOC: DM Lifecycle
2330  *
2331  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2332  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2333  * the base driver's device list to be initialized and torn down accordingly.
2334  *
2335  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2336  */
2337
2338 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2339         .name = "dm",
2340         .early_init = dm_early_init,
2341         .late_init = dm_late_init,
2342         .sw_init = dm_sw_init,
2343         .sw_fini = dm_sw_fini,
2344         .hw_init = dm_hw_init,
2345         .hw_fini = dm_hw_fini,
2346         .suspend = dm_suspend,
2347         .resume = dm_resume,
2348         .is_idle = dm_is_idle,
2349         .wait_for_idle = dm_wait_for_idle,
2350         .check_soft_reset = dm_check_soft_reset,
2351         .soft_reset = dm_soft_reset,
2352         .set_clockgating_state = dm_set_clockgating_state,
2353         .set_powergating_state = dm_set_powergating_state,
2354 };
2355
2356 const struct amdgpu_ip_block_version dm_ip_block =
2357 {
2358         .type = AMD_IP_BLOCK_TYPE_DCE,
2359         .major = 1,
2360         .minor = 0,
2361         .rev = 0,
2362         .funcs = &amdgpu_dm_funcs,
2363 };
2364
2365
2366 /**
2367  * DOC: atomic
2368  *
2369  * *WIP*
2370  */
2371
2372 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2373         .fb_create = amdgpu_display_user_framebuffer_create,
2374         .get_format_info = amd_get_format_info,
2375         .output_poll_changed = drm_fb_helper_output_poll_changed,
2376         .atomic_check = amdgpu_dm_atomic_check,
2377         .atomic_commit = drm_atomic_helper_commit,
2378 };
2379
2380 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2381         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2382 };
2383
2384 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2385 {
2386         u32 max_cll, min_cll, max, min, q, r;
2387         struct amdgpu_dm_backlight_caps *caps;
2388         struct amdgpu_display_manager *dm;
2389         struct drm_connector *conn_base;
2390         struct amdgpu_device *adev;
2391         struct dc_link *link = NULL;
2392         static const u8 pre_computed_values[] = {
2393                 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2394                 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2395
2396         if (!aconnector || !aconnector->dc_link)
2397                 return;
2398
2399         link = aconnector->dc_link;
2400         if (link->connector_signal != SIGNAL_TYPE_EDP)
2401                 return;
2402
2403         conn_base = &aconnector->base;
2404         adev = drm_to_adev(conn_base->dev);
2405         dm = &adev->dm;
2406         caps = &dm->backlight_caps;
2407         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2408         caps->aux_support = false;
2409         max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2410         min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2411
2412         if (caps->ext_caps->bits.oled == 1 ||
2413             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2414             caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2415                 caps->aux_support = true;
2416
2417         if (amdgpu_backlight == 0)
2418                 caps->aux_support = false;
2419         else if (amdgpu_backlight == 1)
2420                 caps->aux_support = true;
2421
2422         /* From the specification (CTA-861-G), for calculating the maximum
2423          * luminance we need to use:
2424          *      Luminance = 50*2**(CV/32)
2425          * Where CV is a one-byte value.
2426          * For calculating this expression we may need float point precision;
2427          * to avoid this complexity level, we take advantage that CV is divided
2428          * by a constant. From the Euclids division algorithm, we know that CV
2429          * can be written as: CV = 32*q + r. Next, we replace CV in the
2430          * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2431          * need to pre-compute the value of r/32. For pre-computing the values
2432          * We just used the following Ruby line:
2433          *      (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2434          * The results of the above expressions can be verified at
2435          * pre_computed_values.
2436          */
2437         q = max_cll >> 5;
2438         r = max_cll % 32;
2439         max = (1 << q) * pre_computed_values[r];
2440
2441         // min luminance: maxLum * (CV/255)^2 / 100
2442         q = DIV_ROUND_CLOSEST(min_cll, 255);
2443         min = max * DIV_ROUND_CLOSEST((q * q), 100);
2444
2445         caps->aux_max_input_signal = max;
2446         caps->aux_min_input_signal = min;
2447 }
2448
2449 void amdgpu_dm_update_connector_after_detect(
2450                 struct amdgpu_dm_connector *aconnector)
2451 {
2452         struct drm_connector *connector = &aconnector->base;
2453         struct drm_device *dev = connector->dev;
2454         struct dc_sink *sink;
2455
2456         /* MST handled by drm_mst framework */
2457         if (aconnector->mst_mgr.mst_state == true)
2458                 return;
2459
2460         sink = aconnector->dc_link->local_sink;
2461         if (sink)
2462                 dc_sink_retain(sink);
2463
2464         /*
2465          * Edid mgmt connector gets first update only in mode_valid hook and then
2466          * the connector sink is set to either fake or physical sink depends on link status.
2467          * Skip if already done during boot.
2468          */
2469         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2470                         && aconnector->dc_em_sink) {
2471
2472                 /*
2473                  * For S3 resume with headless use eml_sink to fake stream
2474                  * because on resume connector->sink is set to NULL
2475                  */
2476                 mutex_lock(&dev->mode_config.mutex);
2477
2478                 if (sink) {
2479                         if (aconnector->dc_sink) {
2480                                 amdgpu_dm_update_freesync_caps(connector, NULL);
2481                                 /*
2482                                  * retain and release below are used to
2483                                  * bump up refcount for sink because the link doesn't point
2484                                  * to it anymore after disconnect, so on next crtc to connector
2485                                  * reshuffle by UMD we will get into unwanted dc_sink release
2486                                  */
2487                                 dc_sink_release(aconnector->dc_sink);
2488                         }
2489                         aconnector->dc_sink = sink;
2490                         dc_sink_retain(aconnector->dc_sink);
2491                         amdgpu_dm_update_freesync_caps(connector,
2492                                         aconnector->edid);
2493                 } else {
2494                         amdgpu_dm_update_freesync_caps(connector, NULL);
2495                         if (!aconnector->dc_sink) {
2496                                 aconnector->dc_sink = aconnector->dc_em_sink;
2497                                 dc_sink_retain(aconnector->dc_sink);
2498                         }
2499                 }
2500
2501                 mutex_unlock(&dev->mode_config.mutex);
2502
2503                 if (sink)
2504                         dc_sink_release(sink);
2505                 return;
2506         }
2507
2508         /*
2509          * TODO: temporary guard to look for proper fix
2510          * if this sink is MST sink, we should not do anything
2511          */
2512         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2513                 dc_sink_release(sink);
2514                 return;
2515         }
2516
2517         if (aconnector->dc_sink == sink) {
2518                 /*
2519                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
2520                  * Do nothing!!
2521                  */
2522                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2523                                 aconnector->connector_id);
2524                 if (sink)
2525                         dc_sink_release(sink);
2526                 return;
2527         }
2528
2529         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2530                 aconnector->connector_id, aconnector->dc_sink, sink);
2531
2532         mutex_lock(&dev->mode_config.mutex);
2533
2534         /*
2535          * 1. Update status of the drm connector
2536          * 2. Send an event and let userspace tell us what to do
2537          */
2538         if (sink) {
2539                 /*
2540                  * TODO: check if we still need the S3 mode update workaround.
2541                  * If yes, put it here.
2542                  */
2543                 if (aconnector->dc_sink) {
2544                         amdgpu_dm_update_freesync_caps(connector, NULL);
2545                         dc_sink_release(aconnector->dc_sink);
2546                 }
2547
2548                 aconnector->dc_sink = sink;
2549                 dc_sink_retain(aconnector->dc_sink);
2550                 if (sink->dc_edid.length == 0) {
2551                         aconnector->edid = NULL;
2552                         if (aconnector->dc_link->aux_mode) {
2553                                 drm_dp_cec_unset_edid(
2554                                         &aconnector->dm_dp_aux.aux);
2555                         }
2556                 } else {
2557                         aconnector->edid =
2558                                 (struct edid *)sink->dc_edid.raw_edid;
2559
2560                         drm_connector_update_edid_property(connector,
2561                                                            aconnector->edid);
2562                         if (aconnector->dc_link->aux_mode)
2563                                 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2564                                                     aconnector->edid);
2565                 }
2566
2567                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2568                 update_connector_ext_caps(aconnector);
2569         } else {
2570                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2571                 amdgpu_dm_update_freesync_caps(connector, NULL);
2572                 drm_connector_update_edid_property(connector, NULL);
2573                 aconnector->num_modes = 0;
2574                 dc_sink_release(aconnector->dc_sink);
2575                 aconnector->dc_sink = NULL;
2576                 aconnector->edid = NULL;
2577 #ifdef CONFIG_DRM_AMD_DC_HDCP
2578                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2579                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2580                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2581 #endif
2582         }
2583
2584         mutex_unlock(&dev->mode_config.mutex);
2585
2586         update_subconnector_property(aconnector);
2587
2588         if (sink)
2589                 dc_sink_release(sink);
2590 }
2591
2592 static void handle_hpd_irq(void *param)
2593 {
2594         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2595         struct drm_connector *connector = &aconnector->base;
2596         struct drm_device *dev = connector->dev;
2597         enum dc_connection_type new_connection_type = dc_connection_none;
2598         struct amdgpu_device *adev = drm_to_adev(dev);
2599 #ifdef CONFIG_DRM_AMD_DC_HDCP
2600         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2601 #endif
2602
2603         if (adev->dm.disable_hpd_irq)
2604                 return;
2605
2606         /*
2607          * In case of failure or MST no need to update connector status or notify the OS
2608          * since (for MST case) MST does this in its own context.
2609          */
2610         mutex_lock(&aconnector->hpd_lock);
2611
2612 #ifdef CONFIG_DRM_AMD_DC_HDCP
2613         if (adev->dm.hdcp_workqueue) {
2614                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2615                 dm_con_state->update_hdcp = true;
2616         }
2617 #endif
2618         if (aconnector->fake_enable)
2619                 aconnector->fake_enable = false;
2620
2621         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2622                 DRM_ERROR("KMS: Failed to detect connector\n");
2623
2624         if (aconnector->base.force && new_connection_type == dc_connection_none) {
2625                 emulated_link_detect(aconnector->dc_link);
2626
2627
2628                 drm_modeset_lock_all(dev);
2629                 dm_restore_drm_connector_state(dev, connector);
2630                 drm_modeset_unlock_all(dev);
2631
2632                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2633                         drm_kms_helper_hotplug_event(dev);
2634
2635         } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2636                 if (new_connection_type == dc_connection_none &&
2637                     aconnector->dc_link->type == dc_connection_none)
2638                         dm_set_dpms_off(aconnector->dc_link);
2639
2640                 amdgpu_dm_update_connector_after_detect(aconnector);
2641
2642                 drm_modeset_lock_all(dev);
2643                 dm_restore_drm_connector_state(dev, connector);
2644                 drm_modeset_unlock_all(dev);
2645
2646                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2647                         drm_kms_helper_hotplug_event(dev);
2648         }
2649         mutex_unlock(&aconnector->hpd_lock);
2650
2651 }
2652
2653 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2654 {
2655         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2656         uint8_t dret;
2657         bool new_irq_handled = false;
2658         int dpcd_addr;
2659         int dpcd_bytes_to_read;
2660
2661         const int max_process_count = 30;
2662         int process_count = 0;
2663
2664         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2665
2666         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2667                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2668                 /* DPCD 0x200 - 0x201 for downstream IRQ */
2669                 dpcd_addr = DP_SINK_COUNT;
2670         } else {
2671                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2672                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2673                 dpcd_addr = DP_SINK_COUNT_ESI;
2674         }
2675
2676         dret = drm_dp_dpcd_read(
2677                 &aconnector->dm_dp_aux.aux,
2678                 dpcd_addr,
2679                 esi,
2680                 dpcd_bytes_to_read);
2681
2682         while (dret == dpcd_bytes_to_read &&
2683                 process_count < max_process_count) {
2684                 uint8_t retry;
2685                 dret = 0;
2686
2687                 process_count++;
2688
2689                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2690                 /* handle HPD short pulse irq */
2691                 if (aconnector->mst_mgr.mst_state)
2692                         drm_dp_mst_hpd_irq(
2693                                 &aconnector->mst_mgr,
2694                                 esi,
2695                                 &new_irq_handled);
2696
2697                 if (new_irq_handled) {
2698                         /* ACK at DPCD to notify down stream */
2699                         const int ack_dpcd_bytes_to_write =
2700                                 dpcd_bytes_to_read - 1;
2701
2702                         for (retry = 0; retry < 3; retry++) {
2703                                 uint8_t wret;
2704
2705                                 wret = drm_dp_dpcd_write(
2706                                         &aconnector->dm_dp_aux.aux,
2707                                         dpcd_addr + 1,
2708                                         &esi[1],
2709                                         ack_dpcd_bytes_to_write);
2710                                 if (wret == ack_dpcd_bytes_to_write)
2711                                         break;
2712                         }
2713
2714                         /* check if there is new irq to be handled */
2715                         dret = drm_dp_dpcd_read(
2716                                 &aconnector->dm_dp_aux.aux,
2717                                 dpcd_addr,
2718                                 esi,
2719                                 dpcd_bytes_to_read);
2720
2721                         new_irq_handled = false;
2722                 } else {
2723                         break;
2724                 }
2725         }
2726
2727         if (process_count == max_process_count)
2728                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2729 }
2730
2731 static void handle_hpd_rx_irq(void *param)
2732 {
2733         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2734         struct drm_connector *connector = &aconnector->base;
2735         struct drm_device *dev = connector->dev;
2736         struct dc_link *dc_link = aconnector->dc_link;
2737         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2738         bool result = false;
2739         enum dc_connection_type new_connection_type = dc_connection_none;
2740         struct amdgpu_device *adev = drm_to_adev(dev);
2741         union hpd_irq_data hpd_irq_data;
2742
2743         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2744
2745         if (adev->dm.disable_hpd_irq)
2746                 return;
2747
2748
2749         /*
2750          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2751          * conflict, after implement i2c helper, this mutex should be
2752          * retired.
2753          */
2754         mutex_lock(&aconnector->hpd_lock);
2755
2756         read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2757
2758         if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2759                 (dc_link->type == dc_connection_mst_branch)) {
2760                 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2761                         result = true;
2762                         dm_handle_hpd_rx_irq(aconnector);
2763                         goto out;
2764                 } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2765                         result = false;
2766                         dm_handle_hpd_rx_irq(aconnector);
2767                         goto out;
2768                 }
2769         }
2770
2771         if (!amdgpu_in_reset(adev)) {
2772                 mutex_lock(&adev->dm.dc_lock);
2773 #ifdef CONFIG_DRM_AMD_DC_HDCP
2774         result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2775 #else
2776         result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2777 #endif
2778                 mutex_unlock(&adev->dm.dc_lock);
2779         }
2780
2781 out:
2782         if (result && !is_mst_root_connector) {
2783                 /* Downstream Port status changed. */
2784                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2785                         DRM_ERROR("KMS: Failed to detect connector\n");
2786
2787                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2788                         emulated_link_detect(dc_link);
2789
2790                         if (aconnector->fake_enable)
2791                                 aconnector->fake_enable = false;
2792
2793                         amdgpu_dm_update_connector_after_detect(aconnector);
2794
2795
2796                         drm_modeset_lock_all(dev);
2797                         dm_restore_drm_connector_state(dev, connector);
2798                         drm_modeset_unlock_all(dev);
2799
2800                         drm_kms_helper_hotplug_event(dev);
2801                 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2802
2803                         if (aconnector->fake_enable)
2804                                 aconnector->fake_enable = false;
2805
2806                         amdgpu_dm_update_connector_after_detect(aconnector);
2807
2808
2809                         drm_modeset_lock_all(dev);
2810                         dm_restore_drm_connector_state(dev, connector);
2811                         drm_modeset_unlock_all(dev);
2812
2813                         drm_kms_helper_hotplug_event(dev);
2814                 }
2815         }
2816 #ifdef CONFIG_DRM_AMD_DC_HDCP
2817         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2818                 if (adev->dm.hdcp_workqueue)
2819                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2820         }
2821 #endif
2822
2823         if (dc_link->type != dc_connection_mst_branch)
2824                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2825
2826         mutex_unlock(&aconnector->hpd_lock);
2827 }
2828
2829 static void register_hpd_handlers(struct amdgpu_device *adev)
2830 {
2831         struct drm_device *dev = adev_to_drm(adev);
2832         struct drm_connector *connector;
2833         struct amdgpu_dm_connector *aconnector;
2834         const struct dc_link *dc_link;
2835         struct dc_interrupt_params int_params = {0};
2836
2837         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2838         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2839
2840         list_for_each_entry(connector,
2841                         &dev->mode_config.connector_list, head) {
2842
2843                 aconnector = to_amdgpu_dm_connector(connector);
2844                 dc_link = aconnector->dc_link;
2845
2846                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2847                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2848                         int_params.irq_source = dc_link->irq_source_hpd;
2849
2850                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2851                                         handle_hpd_irq,
2852                                         (void *) aconnector);
2853                 }
2854
2855                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2856
2857                         /* Also register for DP short pulse (hpd_rx). */
2858                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2859                         int_params.irq_source = dc_link->irq_source_hpd_rx;
2860
2861                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2862                                         handle_hpd_rx_irq,
2863                                         (void *) aconnector);
2864                 }
2865         }
2866 }
2867
2868 #if defined(CONFIG_DRM_AMD_DC_SI)
2869 /* Register IRQ sources and initialize IRQ callbacks */
2870 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2871 {
2872         struct dc *dc = adev->dm.dc;
2873         struct common_irq_params *c_irq_params;
2874         struct dc_interrupt_params int_params = {0};
2875         int r;
2876         int i;
2877         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2878
2879         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2880         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2881
2882         /*
2883          * Actions of amdgpu_irq_add_id():
2884          * 1. Register a set() function with base driver.
2885          *    Base driver will call set() function to enable/disable an
2886          *    interrupt in DC hardware.
2887          * 2. Register amdgpu_dm_irq_handler().
2888          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2889          *    coming from DC hardware.
2890          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2891          *    for acknowledging and handling. */
2892
2893         /* Use VBLANK interrupt */
2894         for (i = 0; i < adev->mode_info.num_crtc; i++) {
2895                 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2896                 if (r) {
2897                         DRM_ERROR("Failed to add crtc irq id!\n");
2898                         return r;
2899                 }
2900
2901                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2902                 int_params.irq_source =
2903                         dc_interrupt_to_irq_source(dc, i+1 , 0);
2904
2905                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2906
2907                 c_irq_params->adev = adev;
2908                 c_irq_params->irq_src = int_params.irq_source;
2909
2910                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2911                                 dm_crtc_high_irq, c_irq_params);
2912         }
2913
2914         /* Use GRPH_PFLIP interrupt */
2915         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2916                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2917                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2918                 if (r) {
2919                         DRM_ERROR("Failed to add page flip irq id!\n");
2920                         return r;
2921                 }
2922
2923                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2924                 int_params.irq_source =
2925                         dc_interrupt_to_irq_source(dc, i, 0);
2926
2927                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2928
2929                 c_irq_params->adev = adev;
2930                 c_irq_params->irq_src = int_params.irq_source;
2931
2932                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2933                                 dm_pflip_high_irq, c_irq_params);
2934
2935         }
2936
2937         /* HPD */
2938         r = amdgpu_irq_add_id(adev, client_id,
2939                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2940         if (r) {
2941                 DRM_ERROR("Failed to add hpd irq id!\n");
2942                 return r;
2943         }
2944
2945         register_hpd_handlers(adev);
2946
2947         return 0;
2948 }
2949 #endif
2950
2951 /* Register IRQ sources and initialize IRQ callbacks */
2952 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2953 {
2954         struct dc *dc = adev->dm.dc;
2955         struct common_irq_params *c_irq_params;
2956         struct dc_interrupt_params int_params = {0};
2957         int r;
2958         int i;
2959         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2960
2961         if (adev->asic_type >= CHIP_VEGA10)
2962                 client_id = SOC15_IH_CLIENTID_DCE;
2963
2964         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2965         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2966
2967         /*
2968          * Actions of amdgpu_irq_add_id():
2969          * 1. Register a set() function with base driver.
2970          *    Base driver will call set() function to enable/disable an
2971          *    interrupt in DC hardware.
2972          * 2. Register amdgpu_dm_irq_handler().
2973          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2974          *    coming from DC hardware.
2975          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2976          *    for acknowledging and handling. */
2977
2978         /* Use VBLANK interrupt */
2979         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2980                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2981                 if (r) {
2982                         DRM_ERROR("Failed to add crtc irq id!\n");
2983                         return r;
2984                 }
2985
2986                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2987                 int_params.irq_source =
2988                         dc_interrupt_to_irq_source(dc, i, 0);
2989
2990                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2991
2992                 c_irq_params->adev = adev;
2993                 c_irq_params->irq_src = int_params.irq_source;
2994
2995                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2996                                 dm_crtc_high_irq, c_irq_params);
2997         }
2998
2999         /* Use VUPDATE interrupt */
3000         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3001                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3002                 if (r) {
3003                         DRM_ERROR("Failed to add vupdate irq id!\n");
3004                         return r;
3005                 }
3006
3007                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3008                 int_params.irq_source =
3009                         dc_interrupt_to_irq_source(dc, i, 0);
3010
3011                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3012
3013                 c_irq_params->adev = adev;
3014                 c_irq_params->irq_src = int_params.irq_source;
3015
3016                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3017                                 dm_vupdate_high_irq, c_irq_params);
3018         }
3019
3020         /* Use GRPH_PFLIP interrupt */
3021         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3022                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3023                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3024                 if (r) {
3025                         DRM_ERROR("Failed to add page flip irq id!\n");
3026                         return r;
3027                 }
3028
3029                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3030                 int_params.irq_source =
3031                         dc_interrupt_to_irq_source(dc, i, 0);
3032
3033                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3034
3035                 c_irq_params->adev = adev;
3036                 c_irq_params->irq_src = int_params.irq_source;
3037
3038                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3039                                 dm_pflip_high_irq, c_irq_params);
3040
3041         }
3042
3043         /* HPD */
3044         r = amdgpu_irq_add_id(adev, client_id,
3045                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3046         if (r) {
3047                 DRM_ERROR("Failed to add hpd irq id!\n");
3048                 return r;
3049         }
3050
3051         register_hpd_handlers(adev);
3052
3053         return 0;
3054 }
3055
3056 #if defined(CONFIG_DRM_AMD_DC_DCN)
3057 /* Register IRQ sources and initialize IRQ callbacks */
3058 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3059 {
3060         struct dc *dc = adev->dm.dc;
3061         struct common_irq_params *c_irq_params;
3062         struct dc_interrupt_params int_params = {0};
3063         int r;
3064         int i;
3065 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3066         static const unsigned int vrtl_int_srcid[] = {
3067                 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3068                 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3069                 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3070                 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3071                 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3072                 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3073         };
3074 #endif
3075
3076         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3077         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3078
3079         /*
3080          * Actions of amdgpu_irq_add_id():
3081          * 1. Register a set() function with base driver.
3082          *    Base driver will call set() function to enable/disable an
3083          *    interrupt in DC hardware.
3084          * 2. Register amdgpu_dm_irq_handler().
3085          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3086          *    coming from DC hardware.
3087          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3088          *    for acknowledging and handling.
3089          */
3090
3091         /* Use VSTARTUP interrupt */
3092         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3093                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3094                         i++) {
3095                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3096
3097                 if (r) {
3098                         DRM_ERROR("Failed to add crtc irq id!\n");
3099                         return r;
3100                 }
3101
3102                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3103                 int_params.irq_source =
3104                         dc_interrupt_to_irq_source(dc, i, 0);
3105
3106                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3107
3108                 c_irq_params->adev = adev;
3109                 c_irq_params->irq_src = int_params.irq_source;
3110
3111                 amdgpu_dm_irq_register_interrupt(
3112                         adev, &int_params, dm_crtc_high_irq, c_irq_params);
3113         }
3114
3115         /* Use otg vertical line interrupt */
3116 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3117         for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3118                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3119                                 vrtl_int_srcid[i], &adev->vline0_irq);
3120
3121                 if (r) {
3122                         DRM_ERROR("Failed to add vline0 irq id!\n");
3123                         return r;
3124                 }
3125
3126                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3127                 int_params.irq_source =
3128                         dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3129
3130                 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3131                         DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3132                         break;
3133                 }
3134
3135                 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3136                                         - DC_IRQ_SOURCE_DC1_VLINE0];
3137
3138                 c_irq_params->adev = adev;
3139                 c_irq_params->irq_src = int_params.irq_source;
3140
3141                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3142                                 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3143         }
3144 #endif
3145
3146         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3147          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3148          * to trigger at end of each vblank, regardless of state of the lock,
3149          * matching DCE behaviour.
3150          */
3151         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3152              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3153              i++) {
3154                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3155
3156                 if (r) {
3157                         DRM_ERROR("Failed to add vupdate irq id!\n");
3158                         return r;
3159                 }
3160
3161                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3162                 int_params.irq_source =
3163                         dc_interrupt_to_irq_source(dc, i, 0);
3164
3165                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3166
3167                 c_irq_params->adev = adev;
3168                 c_irq_params->irq_src = int_params.irq_source;
3169
3170                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3171                                 dm_vupdate_high_irq, c_irq_params);
3172         }
3173
3174         /* Use GRPH_PFLIP interrupt */
3175         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3176                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3177                         i++) {
3178                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3179                 if (r) {
3180                         DRM_ERROR("Failed to add page flip irq id!\n");
3181                         return r;
3182                 }
3183
3184                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3185                 int_params.irq_source =
3186                         dc_interrupt_to_irq_source(dc, i, 0);
3187
3188                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3189
3190                 c_irq_params->adev = adev;
3191                 c_irq_params->irq_src = int_params.irq_source;
3192
3193                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3194                                 dm_pflip_high_irq, c_irq_params);
3195
3196         }
3197
3198         /* HPD */
3199         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3200                         &adev->hpd_irq);
3201         if (r) {
3202                 DRM_ERROR("Failed to add hpd irq id!\n");
3203                 return r;
3204         }
3205
3206         register_hpd_handlers(adev);
3207
3208         return 0;
3209 }
3210 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3211 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3212 {
3213         struct dc *dc = adev->dm.dc;
3214         struct common_irq_params *c_irq_params;
3215         struct dc_interrupt_params int_params = {0};
3216         int r, i;
3217
3218         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3219         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3220
3221         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3222                         &adev->dmub_outbox_irq);
3223         if (r) {
3224                 DRM_ERROR("Failed to add outbox irq id!\n");
3225                 return r;
3226         }
3227
3228         if (dc->ctx->dmub_srv) {
3229                 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3230                 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3231                 int_params.irq_source =
3232                 dc_interrupt_to_irq_source(dc, i, 0);
3233
3234                 c_irq_params = &adev->dm.dmub_outbox_params[0];
3235
3236                 c_irq_params->adev = adev;
3237                 c_irq_params->irq_src = int_params.irq_source;
3238
3239                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3240                                 dm_dmub_outbox1_low_irq, c_irq_params);
3241         }
3242
3243         return 0;
3244 }
3245 #endif
3246
3247 /*
3248  * Acquires the lock for the atomic state object and returns
3249  * the new atomic state.
3250  *
3251  * This should only be called during atomic check.
3252  */
3253 static int dm_atomic_get_state(struct drm_atomic_state *state,
3254                                struct dm_atomic_state **dm_state)
3255 {
3256         struct drm_device *dev = state->dev;
3257         struct amdgpu_device *adev = drm_to_adev(dev);
3258         struct amdgpu_display_manager *dm = &adev->dm;
3259         struct drm_private_state *priv_state;
3260
3261         if (*dm_state)
3262                 return 0;
3263
3264         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3265         if (IS_ERR(priv_state))
3266                 return PTR_ERR(priv_state);
3267
3268         *dm_state = to_dm_atomic_state(priv_state);
3269
3270         return 0;
3271 }
3272
3273 static struct dm_atomic_state *
3274 dm_atomic_get_new_state(struct drm_atomic_state *state)
3275 {
3276         struct drm_device *dev = state->dev;
3277         struct amdgpu_device *adev = drm_to_adev(dev);
3278         struct amdgpu_display_manager *dm = &adev->dm;
3279         struct drm_private_obj *obj;
3280         struct drm_private_state *new_obj_state;
3281         int i;
3282
3283         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3284                 if (obj->funcs == dm->atomic_obj.funcs)
3285                         return to_dm_atomic_state(new_obj_state);
3286         }
3287
3288         return NULL;
3289 }
3290
3291 static struct drm_private_state *
3292 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3293 {
3294         struct dm_atomic_state *old_state, *new_state;
3295
3296         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3297         if (!new_state)
3298                 return NULL;
3299
3300         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3301
3302         old_state = to_dm_atomic_state(obj->state);
3303
3304         if (old_state && old_state->context)
3305                 new_state->context = dc_copy_state(old_state->context);
3306
3307         if (!new_state->context) {
3308                 kfree(new_state);
3309                 return NULL;
3310         }
3311
3312         return &new_state->base;
3313 }
3314
3315 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3316                                     struct drm_private_state *state)
3317 {
3318         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3319
3320         if (dm_state && dm_state->context)
3321                 dc_release_state(dm_state->context);
3322
3323         kfree(dm_state);
3324 }
3325
3326 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3327         .atomic_duplicate_state = dm_atomic_duplicate_state,
3328         .atomic_destroy_state = dm_atomic_destroy_state,
3329 };
3330
3331 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3332 {
3333         struct dm_atomic_state *state;
3334         int r;
3335
3336         adev->mode_info.mode_config_initialized = true;
3337
3338         adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3339         adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3340
3341         adev_to_drm(adev)->mode_config.max_width = 16384;
3342         adev_to_drm(adev)->mode_config.max_height = 16384;
3343
3344         adev_to_drm(adev)->mode_config.preferred_depth = 24;
3345         adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3346         /* indicates support for immediate flip */
3347         adev_to_drm(adev)->mode_config.async_page_flip = true;
3348
3349         adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3350
3351         state = kzalloc(sizeof(*state), GFP_KERNEL);
3352         if (!state)
3353                 return -ENOMEM;
3354
3355         state->context = dc_create_state(adev->dm.dc);
3356         if (!state->context) {
3357                 kfree(state);
3358                 return -ENOMEM;
3359         }
3360
3361         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3362
3363         drm_atomic_private_obj_init(adev_to_drm(adev),
3364                                     &adev->dm.atomic_obj,
3365                                     &state->base,
3366                                     &dm_atomic_state_funcs);
3367
3368         r = amdgpu_display_modeset_create_props(adev);
3369         if (r) {
3370                 dc_release_state(state->context);
3371                 kfree(state);
3372                 return r;
3373         }
3374
3375         r = amdgpu_dm_audio_init(adev);
3376         if (r) {
3377                 dc_release_state(state->context);
3378                 kfree(state);
3379                 return r;
3380         }
3381
3382         return 0;
3383 }
3384
3385 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3386 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3387 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3388
3389 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3390         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3391
3392 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3393 {
3394 #if defined(CONFIG_ACPI)
3395         struct amdgpu_dm_backlight_caps caps;
3396
3397         memset(&caps, 0, sizeof(caps));
3398
3399         if (dm->backlight_caps.caps_valid)
3400                 return;
3401
3402         amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3403         if (caps.caps_valid) {
3404                 dm->backlight_caps.caps_valid = true;
3405                 if (caps.aux_support)
3406                         return;
3407                 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3408                 dm->backlight_caps.max_input_signal = caps.max_input_signal;
3409         } else {
3410                 dm->backlight_caps.min_input_signal =
3411                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3412                 dm->backlight_caps.max_input_signal =
3413                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3414         }
3415 #else
3416         if (dm->backlight_caps.aux_support)
3417                 return;
3418
3419         dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3420         dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3421 #endif
3422 }
3423
3424 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3425                                 unsigned *min, unsigned *max)
3426 {
3427         if (!caps)
3428                 return 0;
3429
3430         if (caps->aux_support) {
3431                 // Firmware limits are in nits, DC API wants millinits.
3432                 *max = 1000 * caps->aux_max_input_signal;
3433                 *min = 1000 * caps->aux_min_input_signal;
3434         } else {
3435                 // Firmware limits are 8-bit, PWM control is 16-bit.
3436                 *max = 0x101 * caps->max_input_signal;
3437                 *min = 0x101 * caps->min_input_signal;
3438         }
3439         return 1;
3440 }
3441
3442 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3443                                         uint32_t brightness)
3444 {
3445         unsigned min, max;
3446
3447         if (!get_brightness_range(caps, &min, &max))
3448                 return brightness;
3449
3450         // Rescale 0..255 to min..max
3451         return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3452                                        AMDGPU_MAX_BL_LEVEL);
3453 }
3454
3455 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3456                                       uint32_t brightness)
3457 {
3458         unsigned min, max;
3459
3460         if (!get_brightness_range(caps, &min, &max))
3461                 return brightness;
3462
3463         if (brightness < min)
3464                 return 0;
3465         // Rescale min..max to 0..255
3466         return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3467                                  max - min);
3468 }
3469
3470 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3471 {
3472         struct amdgpu_display_manager *dm = bl_get_data(bd);
3473         struct amdgpu_dm_backlight_caps caps;
3474         struct dc_link *link[AMDGPU_DM_MAX_NUM_EDP];
3475         u32 brightness;
3476         bool rc;
3477         int i;
3478
3479         amdgpu_dm_update_backlight_caps(dm);
3480         caps = dm->backlight_caps;
3481
3482         for (i = 0; i < dm->num_of_edps; i++)
3483                 link[i] = (struct dc_link *)dm->backlight_link[i];
3484
3485         brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3486         // Change brightness based on AUX property
3487         if (caps.aux_support) {
3488                 for (i = 0; i < dm->num_of_edps; i++) {
3489                         rc = dc_link_set_backlight_level_nits(link[i], true, brightness,
3490                                 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3491                         if (!rc) {
3492                                 DRM_ERROR("DM: Failed to update backlight via AUX on eDP[%d]\n", i);
3493                                 break;
3494                         }
3495                 }
3496         } else {
3497                 for (i = 0; i < dm->num_of_edps; i++) {
3498                         rc = dc_link_set_backlight_level(dm->backlight_link[i], brightness, 0);
3499                         if (!rc) {
3500                                 DRM_ERROR("DM: Failed to update backlight on eDP[%d]\n", i);
3501                                 break;
3502                         }
3503                 }
3504         }
3505
3506         return rc ? 0 : 1;
3507 }
3508
3509 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3510 {
3511         struct amdgpu_display_manager *dm = bl_get_data(bd);
3512         struct amdgpu_dm_backlight_caps caps;
3513
3514         amdgpu_dm_update_backlight_caps(dm);
3515         caps = dm->backlight_caps;
3516
3517         if (caps.aux_support) {
3518                 struct dc_link *link = (struct dc_link *)dm->backlight_link[0];
3519                 u32 avg, peak;
3520                 bool rc;
3521
3522                 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3523                 if (!rc)
3524                         return bd->props.brightness;
3525                 return convert_brightness_to_user(&caps, avg);
3526         } else {
3527                 int ret = dc_link_get_backlight_level(dm->backlight_link[0]);
3528
3529                 if (ret == DC_ERROR_UNEXPECTED)
3530                         return bd->props.brightness;
3531                 return convert_brightness_to_user(&caps, ret);
3532         }
3533 }
3534
3535 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3536         .options = BL_CORE_SUSPENDRESUME,
3537         .get_brightness = amdgpu_dm_backlight_get_brightness,
3538         .update_status  = amdgpu_dm_backlight_update_status,
3539 };
3540
3541 static void
3542 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3543 {
3544         char bl_name[16];
3545         struct backlight_properties props = { 0 };
3546
3547         amdgpu_dm_update_backlight_caps(dm);
3548
3549         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3550         props.brightness = AMDGPU_MAX_BL_LEVEL;
3551         props.type = BACKLIGHT_RAW;
3552
3553         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3554                  adev_to_drm(dm->adev)->primary->index);
3555
3556         dm->backlight_dev = backlight_device_register(bl_name,
3557                                                       adev_to_drm(dm->adev)->dev,
3558                                                       dm,
3559                                                       &amdgpu_dm_backlight_ops,
3560                                                       &props);
3561
3562         if (IS_ERR(dm->backlight_dev))
3563                 DRM_ERROR("DM: Backlight registration failed!\n");
3564         else
3565                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3566 }
3567
3568 #endif
3569
3570 static int initialize_plane(struct amdgpu_display_manager *dm,
3571                             struct amdgpu_mode_info *mode_info, int plane_id,
3572                             enum drm_plane_type plane_type,
3573                             const struct dc_plane_cap *plane_cap)
3574 {
3575         struct drm_plane *plane;
3576         unsigned long possible_crtcs;
3577         int ret = 0;
3578
3579         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3580         if (!plane) {
3581                 DRM_ERROR("KMS: Failed to allocate plane\n");
3582                 return -ENOMEM;
3583         }
3584         plane->type = plane_type;
3585
3586         /*
3587          * HACK: IGT tests expect that the primary plane for a CRTC
3588          * can only have one possible CRTC. Only expose support for
3589          * any CRTC if they're not going to be used as a primary plane
3590          * for a CRTC - like overlay or underlay planes.
3591          */
3592         possible_crtcs = 1 << plane_id;
3593         if (plane_id >= dm->dc->caps.max_streams)
3594                 possible_crtcs = 0xff;
3595
3596         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3597
3598         if (ret) {
3599                 DRM_ERROR("KMS: Failed to initialize plane\n");
3600                 kfree(plane);
3601                 return ret;
3602         }
3603
3604         if (mode_info)
3605                 mode_info->planes[plane_id] = plane;
3606
3607         return ret;
3608 }
3609
3610
3611 static void register_backlight_device(struct amdgpu_display_manager *dm,
3612                                       struct dc_link *link)
3613 {
3614 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3615         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3616
3617         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3618             link->type != dc_connection_none) {
3619                 /*
3620                  * Event if registration failed, we should continue with
3621                  * DM initialization because not having a backlight control
3622                  * is better then a black screen.
3623                  */
3624                 if (!dm->backlight_dev)
3625                         amdgpu_dm_register_backlight_device(dm);
3626
3627                 if (dm->backlight_dev) {
3628                         dm->backlight_link[dm->num_of_edps] = link;
3629                         dm->num_of_edps++;
3630                 }
3631         }
3632 #endif
3633 }
3634
3635
3636 /*
3637  * In this architecture, the association
3638  * connector -> encoder -> crtc
3639  * id not really requried. The crtc and connector will hold the
3640  * display_index as an abstraction to use with DAL component
3641  *
3642  * Returns 0 on success
3643  */
3644 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3645 {
3646         struct amdgpu_display_manager *dm = &adev->dm;
3647         int32_t i;
3648         struct amdgpu_dm_connector *aconnector = NULL;
3649         struct amdgpu_encoder *aencoder = NULL;
3650         struct amdgpu_mode_info *mode_info = &adev->mode_info;
3651         uint32_t link_cnt;
3652         int32_t primary_planes;
3653         enum dc_connection_type new_connection_type = dc_connection_none;
3654         const struct dc_plane_cap *plane;
3655
3656         dm->display_indexes_num = dm->dc->caps.max_streams;
3657         /* Update the actual used number of crtc */
3658         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3659
3660         link_cnt = dm->dc->caps.max_links;
3661         if (amdgpu_dm_mode_config_init(dm->adev)) {
3662                 DRM_ERROR("DM: Failed to initialize mode config\n");
3663                 return -EINVAL;
3664         }
3665
3666         /* There is one primary plane per CRTC */
3667         primary_planes = dm->dc->caps.max_streams;
3668         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3669
3670         /*
3671          * Initialize primary planes, implicit planes for legacy IOCTLS.
3672          * Order is reversed to match iteration order in atomic check.
3673          */
3674         for (i = (primary_planes - 1); i >= 0; i--) {
3675                 plane = &dm->dc->caps.planes[i];
3676
3677                 if (initialize_plane(dm, mode_info, i,
3678                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
3679                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
3680                         goto fail;
3681                 }
3682         }
3683
3684         /*
3685          * Initialize overlay planes, index starting after primary planes.
3686          * These planes have a higher DRM index than the primary planes since
3687          * they should be considered as having a higher z-order.
3688          * Order is reversed to match iteration order in atomic check.
3689          *
3690          * Only support DCN for now, and only expose one so we don't encourage
3691          * userspace to use up all the pipes.
3692          */
3693         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3694                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3695
3696                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3697                         continue;
3698
3699                 if (!plane->blends_with_above || !plane->blends_with_below)
3700                         continue;
3701
3702                 if (!plane->pixel_format_support.argb8888)
3703                         continue;
3704
3705                 if (initialize_plane(dm, NULL, primary_planes + i,
3706                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
3707                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3708                         goto fail;
3709                 }
3710
3711                 /* Only create one overlay plane. */
3712                 break;
3713         }
3714
3715         for (i = 0; i < dm->dc->caps.max_streams; i++)
3716                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3717                         DRM_ERROR("KMS: Failed to initialize crtc\n");
3718                         goto fail;
3719                 }
3720
3721         /* Use Outbox interrupt */
3722         switch (adev->asic_type) {
3723         case CHIP_SIENNA_CICHLID:
3724         case CHIP_NAVY_FLOUNDER:
3725         case CHIP_RENOIR:
3726                 if (register_outbox_irq_handlers(dm->adev)) {
3727                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3728                         goto fail;
3729                 }
3730                 break;
3731         default:
3732                 DRM_DEBUG_KMS("Unsupported ASIC type for outbox: 0x%X\n", adev->asic_type);
3733         }
3734
3735         /* loops over all connectors on the board */
3736         for (i = 0; i < link_cnt; i++) {
3737                 struct dc_link *link = NULL;
3738
3739                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3740                         DRM_ERROR(
3741                                 "KMS: Cannot support more than %d display indexes\n",
3742                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
3743                         continue;
3744                 }
3745
3746                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3747                 if (!aconnector)
3748                         goto fail;
3749
3750                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3751                 if (!aencoder)
3752                         goto fail;
3753
3754                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3755                         DRM_ERROR("KMS: Failed to initialize encoder\n");
3756                         goto fail;
3757                 }
3758
3759                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3760                         DRM_ERROR("KMS: Failed to initialize connector\n");
3761                         goto fail;
3762                 }
3763
3764                 link = dc_get_link_at_index(dm->dc, i);
3765
3766                 if (!dc_link_detect_sink(link, &new_connection_type))
3767                         DRM_ERROR("KMS: Failed to detect connector\n");
3768
3769                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3770                         emulated_link_detect(link);
3771                         amdgpu_dm_update_connector_after_detect(aconnector);
3772
3773                 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3774                         amdgpu_dm_update_connector_after_detect(aconnector);
3775                         register_backlight_device(dm, link);
3776                         if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3777                                 amdgpu_dm_set_psr_caps(link);
3778                 }
3779
3780
3781         }
3782
3783         /* Software is initialized. Now we can register interrupt handlers. */
3784         switch (adev->asic_type) {
3785 #if defined(CONFIG_DRM_AMD_DC_SI)
3786         case CHIP_TAHITI:
3787         case CHIP_PITCAIRN:
3788         case CHIP_VERDE:
3789         case CHIP_OLAND:
3790                 if (dce60_register_irq_handlers(dm->adev)) {
3791                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3792                         goto fail;
3793                 }
3794                 break;
3795 #endif
3796         case CHIP_BONAIRE:
3797         case CHIP_HAWAII:
3798         case CHIP_KAVERI:
3799         case CHIP_KABINI:
3800         case CHIP_MULLINS:
3801         case CHIP_TONGA:
3802         case CHIP_FIJI:
3803         case CHIP_CARRIZO:
3804         case CHIP_STONEY:
3805         case CHIP_POLARIS11:
3806         case CHIP_POLARIS10:
3807         case CHIP_POLARIS12:
3808         case CHIP_VEGAM:
3809         case CHIP_VEGA10:
3810         case CHIP_VEGA12:
3811         case CHIP_VEGA20:
3812                 if (dce110_register_irq_handlers(dm->adev)) {
3813                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3814                         goto fail;
3815                 }
3816                 break;
3817 #if defined(CONFIG_DRM_AMD_DC_DCN)
3818         case CHIP_RAVEN:
3819         case CHIP_NAVI12:
3820         case CHIP_NAVI10:
3821         case CHIP_NAVI14:
3822         case CHIP_RENOIR:
3823         case CHIP_SIENNA_CICHLID:
3824         case CHIP_NAVY_FLOUNDER:
3825         case CHIP_DIMGREY_CAVEFISH:
3826         case CHIP_VANGOGH:
3827                 if (dcn10_register_irq_handlers(dm->adev)) {
3828                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3829                         goto fail;
3830                 }
3831                 break;
3832 #endif
3833         default:
3834                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3835                 goto fail;
3836         }
3837
3838         return 0;
3839 fail:
3840         kfree(aencoder);
3841         kfree(aconnector);
3842
3843         return -EINVAL;
3844 }
3845
3846 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3847 {
3848         drm_mode_config_cleanup(dm->ddev);
3849         drm_atomic_private_obj_fini(&dm->atomic_obj);
3850         return;
3851 }
3852
3853 /******************************************************************************
3854  * amdgpu_display_funcs functions
3855  *****************************************************************************/
3856
3857 /*
3858  * dm_bandwidth_update - program display watermarks
3859  *
3860  * @adev: amdgpu_device pointer
3861  *
3862  * Calculate and program the display watermarks and line buffer allocation.
3863  */
3864 static void dm_bandwidth_update(struct amdgpu_device *adev)
3865 {
3866         /* TODO: implement later */
3867 }
3868
3869 static const struct amdgpu_display_funcs dm_display_funcs = {
3870         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3871         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3872         .backlight_set_level = NULL, /* never called for DC */
3873         .backlight_get_level = NULL, /* never called for DC */
3874         .hpd_sense = NULL,/* called unconditionally */
3875         .hpd_set_polarity = NULL, /* called unconditionally */
3876         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3877         .page_flip_get_scanoutpos =
3878                 dm_crtc_get_scanoutpos,/* called unconditionally */
3879         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3880         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3881 };
3882
3883 #if defined(CONFIG_DEBUG_KERNEL_DC)
3884
3885 static ssize_t s3_debug_store(struct device *device,
3886                               struct device_attribute *attr,
3887                               const char *buf,
3888                               size_t count)
3889 {
3890         int ret;
3891         int s3_state;
3892         struct drm_device *drm_dev = dev_get_drvdata(device);
3893         struct amdgpu_device *adev = drm_to_adev(drm_dev);
3894
3895         ret = kstrtoint(buf, 0, &s3_state);
3896
3897         if (ret == 0) {
3898                 if (s3_state) {
3899                         dm_resume(adev);
3900                         drm_kms_helper_hotplug_event(adev_to_drm(adev));
3901                 } else
3902                         dm_suspend(adev);
3903         }
3904
3905         return ret == 0 ? count : 0;
3906 }
3907
3908 DEVICE_ATTR_WO(s3_debug);
3909
3910 #endif
3911
3912 static int dm_early_init(void *handle)
3913 {
3914         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3915
3916         switch (adev->asic_type) {
3917 #if defined(CONFIG_DRM_AMD_DC_SI)
3918         case CHIP_TAHITI:
3919         case CHIP_PITCAIRN:
3920         case CHIP_VERDE:
3921                 adev->mode_info.num_crtc = 6;
3922                 adev->mode_info.num_hpd = 6;
3923                 adev->mode_info.num_dig = 6;
3924                 break;
3925         case CHIP_OLAND:
3926                 adev->mode_info.num_crtc = 2;
3927                 adev->mode_info.num_hpd = 2;
3928                 adev->mode_info.num_dig = 2;
3929                 break;
3930 #endif
3931         case CHIP_BONAIRE:
3932         case CHIP_HAWAII:
3933                 adev->mode_info.num_crtc = 6;
3934                 adev->mode_info.num_hpd = 6;
3935                 adev->mode_info.num_dig = 6;
3936                 break;
3937         case CHIP_KAVERI:
3938                 adev->mode_info.num_crtc = 4;
3939                 adev->mode_info.num_hpd = 6;
3940                 adev->mode_info.num_dig = 7;
3941                 break;
3942         case CHIP_KABINI:
3943         case CHIP_MULLINS:
3944                 adev->mode_info.num_crtc = 2;
3945                 adev->mode_info.num_hpd = 6;
3946                 adev->mode_info.num_dig = 6;
3947                 break;
3948         case CHIP_FIJI:
3949         case CHIP_TONGA:
3950                 adev->mode_info.num_crtc = 6;
3951                 adev->mode_info.num_hpd = 6;
3952                 adev->mode_info.num_dig = 7;
3953                 break;
3954         case CHIP_CARRIZO:
3955                 adev->mode_info.num_crtc = 3;
3956                 adev->mode_info.num_hpd = 6;
3957                 adev->mode_info.num_dig = 9;
3958                 break;
3959         case CHIP_STONEY:
3960                 adev->mode_info.num_crtc = 2;
3961                 adev->mode_info.num_hpd = 6;
3962                 adev->mode_info.num_dig = 9;
3963                 break;
3964         case CHIP_POLARIS11:
3965         case CHIP_POLARIS12:
3966                 adev->mode_info.num_crtc = 5;
3967                 adev->mode_info.num_hpd = 5;
3968                 adev->mode_info.num_dig = 5;
3969                 break;
3970         case CHIP_POLARIS10:
3971         case CHIP_VEGAM:
3972                 adev->mode_info.num_crtc = 6;
3973                 adev->mode_info.num_hpd = 6;
3974                 adev->mode_info.num_dig = 6;
3975                 break;
3976         case CHIP_VEGA10:
3977         case CHIP_VEGA12:
3978         case CHIP_VEGA20:
3979                 adev->mode_info.num_crtc = 6;
3980                 adev->mode_info.num_hpd = 6;
3981                 adev->mode_info.num_dig = 6;
3982                 break;
3983 #if defined(CONFIG_DRM_AMD_DC_DCN)
3984         case CHIP_RAVEN:
3985         case CHIP_RENOIR:
3986         case CHIP_VANGOGH:
3987                 adev->mode_info.num_crtc = 4;
3988                 adev->mode_info.num_hpd = 4;
3989                 adev->mode_info.num_dig = 4;
3990                 break;
3991         case CHIP_NAVI10:
3992         case CHIP_NAVI12:
3993         case CHIP_SIENNA_CICHLID:
3994         case CHIP_NAVY_FLOUNDER:
3995                 adev->mode_info.num_crtc = 6;
3996                 adev->mode_info.num_hpd = 6;
3997                 adev->mode_info.num_dig = 6;
3998                 break;
3999         case CHIP_NAVI14:
4000         case CHIP_DIMGREY_CAVEFISH:
4001                 adev->mode_info.num_crtc = 5;
4002                 adev->mode_info.num_hpd = 5;
4003                 adev->mode_info.num_dig = 5;
4004                 break;
4005 #endif
4006         default:
4007                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4008                 return -EINVAL;
4009         }
4010
4011         amdgpu_dm_set_irq_funcs(adev);
4012
4013         if (adev->mode_info.funcs == NULL)
4014                 adev->mode_info.funcs = &dm_display_funcs;
4015
4016         /*
4017          * Note: Do NOT change adev->audio_endpt_rreg and
4018          * adev->audio_endpt_wreg because they are initialised in
4019          * amdgpu_device_init()
4020          */
4021 #if defined(CONFIG_DEBUG_KERNEL_DC)
4022         device_create_file(
4023                 adev_to_drm(adev)->dev,
4024                 &dev_attr_s3_debug);
4025 #endif
4026
4027         return 0;
4028 }
4029
4030 static bool modeset_required(struct drm_crtc_state *crtc_state,
4031                              struct dc_stream_state *new_stream,
4032                              struct dc_stream_state *old_stream)
4033 {
4034         return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4035 }
4036
4037 static bool modereset_required(struct drm_crtc_state *crtc_state)
4038 {
4039         return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4040 }
4041
4042 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4043 {
4044         drm_encoder_cleanup(encoder);
4045         kfree(encoder);
4046 }
4047
4048 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4049         .destroy = amdgpu_dm_encoder_destroy,
4050 };
4051
4052
4053 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4054                                          struct drm_framebuffer *fb,
4055                                          int *min_downscale, int *max_upscale)
4056 {
4057         struct amdgpu_device *adev = drm_to_adev(dev);
4058         struct dc *dc = adev->dm.dc;
4059         /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4060         struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4061
4062         switch (fb->format->format) {
4063         case DRM_FORMAT_P010:
4064         case DRM_FORMAT_NV12:
4065         case DRM_FORMAT_NV21:
4066                 *max_upscale = plane_cap->max_upscale_factor.nv12;
4067                 *min_downscale = plane_cap->max_downscale_factor.nv12;
4068                 break;
4069
4070         case DRM_FORMAT_XRGB16161616F:
4071         case DRM_FORMAT_ARGB16161616F:
4072         case DRM_FORMAT_XBGR16161616F:
4073         case DRM_FORMAT_ABGR16161616F:
4074                 *max_upscale = plane_cap->max_upscale_factor.fp16;
4075                 *min_downscale = plane_cap->max_downscale_factor.fp16;
4076                 break;
4077
4078         default:
4079                 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4080                 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4081                 break;
4082         }
4083
4084         /*
4085          * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4086          * scaling factor of 1.0 == 1000 units.
4087          */
4088         if (*max_upscale == 1)
4089                 *max_upscale = 1000;
4090
4091         if (*min_downscale == 1)
4092                 *min_downscale = 1000;
4093 }
4094
4095
4096 static int fill_dc_scaling_info(const struct drm_plane_state *state,
4097                                 struct dc_scaling_info *scaling_info)
4098 {
4099         int scale_w, scale_h, min_downscale, max_upscale;
4100
4101         memset(scaling_info, 0, sizeof(*scaling_info));
4102
4103         /* Source is fixed 16.16 but we ignore mantissa for now... */
4104         scaling_info->src_rect.x = state->src_x >> 16;
4105         scaling_info->src_rect.y = state->src_y >> 16;
4106
4107         /*
4108          * For reasons we don't (yet) fully understand a non-zero
4109          * src_y coordinate into an NV12 buffer can cause a
4110          * system hang. To avoid hangs (and maybe be overly cautious)
4111          * let's reject both non-zero src_x and src_y.
4112          *
4113          * We currently know of only one use-case to reproduce a
4114          * scenario with non-zero src_x and src_y for NV12, which
4115          * is to gesture the YouTube Android app into full screen
4116          * on ChromeOS.
4117          */
4118         if (state->fb &&
4119             state->fb->format->format == DRM_FORMAT_NV12 &&
4120             (scaling_info->src_rect.x != 0 ||
4121              scaling_info->src_rect.y != 0))
4122                 return -EINVAL;
4123
4124         scaling_info->src_rect.width = state->src_w >> 16;
4125         if (scaling_info->src_rect.width == 0)
4126                 return -EINVAL;
4127
4128         scaling_info->src_rect.height = state->src_h >> 16;
4129         if (scaling_info->src_rect.height == 0)
4130                 return -EINVAL;
4131
4132         scaling_info->dst_rect.x = state->crtc_x;
4133         scaling_info->dst_rect.y = state->crtc_y;
4134
4135         if (state->crtc_w == 0)
4136                 return -EINVAL;
4137
4138         scaling_info->dst_rect.width = state->crtc_w;
4139
4140         if (state->crtc_h == 0)
4141                 return -EINVAL;
4142
4143         scaling_info->dst_rect.height = state->crtc_h;
4144
4145         /* DRM doesn't specify clipping on destination output. */
4146         scaling_info->clip_rect = scaling_info->dst_rect;
4147
4148         /* Validate scaling per-format with DC plane caps */
4149         if (state->plane && state->plane->dev && state->fb) {
4150                 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4151                                              &min_downscale, &max_upscale);
4152         } else {
4153                 min_downscale = 250;
4154                 max_upscale = 16000;
4155         }
4156
4157         scale_w = scaling_info->dst_rect.width * 1000 /
4158                   scaling_info->src_rect.width;
4159
4160         if (scale_w < min_downscale || scale_w > max_upscale)
4161                 return -EINVAL;
4162
4163         scale_h = scaling_info->dst_rect.height * 1000 /
4164                   scaling_info->src_rect.height;
4165
4166         if (scale_h < min_downscale || scale_h > max_upscale)
4167                 return -EINVAL;
4168
4169         /*
4170          * The "scaling_quality" can be ignored for now, quality = 0 has DC
4171          * assume reasonable defaults based on the format.
4172          */
4173
4174         return 0;
4175 }
4176
4177 static void
4178 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4179                                  uint64_t tiling_flags)
4180 {
4181         /* Fill GFX8 params */
4182         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4183                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4184
4185                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4186                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4187                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4188                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4189                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4190
4191                 /* XXX fix me for VI */
4192                 tiling_info->gfx8.num_banks = num_banks;
4193                 tiling_info->gfx8.array_mode =
4194                                 DC_ARRAY_2D_TILED_THIN1;
4195                 tiling_info->gfx8.tile_split = tile_split;
4196                 tiling_info->gfx8.bank_width = bankw;
4197                 tiling_info->gfx8.bank_height = bankh;
4198                 tiling_info->gfx8.tile_aspect = mtaspect;
4199                 tiling_info->gfx8.tile_mode =
4200                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4201         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4202                         == DC_ARRAY_1D_TILED_THIN1) {
4203                 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4204         }
4205
4206         tiling_info->gfx8.pipe_config =
4207                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4208 }
4209
4210 static void
4211 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4212                                   union dc_tiling_info *tiling_info)
4213 {
4214         tiling_info->gfx9.num_pipes =
4215                 adev->gfx.config.gb_addr_config_fields.num_pipes;
4216         tiling_info->gfx9.num_banks =
4217                 adev->gfx.config.gb_addr_config_fields.num_banks;
4218         tiling_info->gfx9.pipe_interleave =
4219                 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4220         tiling_info->gfx9.num_shader_engines =
4221                 adev->gfx.config.gb_addr_config_fields.num_se;
4222         tiling_info->gfx9.max_compressed_frags =
4223                 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4224         tiling_info->gfx9.num_rb_per_se =
4225                 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4226         tiling_info->gfx9.shaderEnable = 1;
4227         if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4228             adev->asic_type == CHIP_NAVY_FLOUNDER ||
4229             adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4230             adev->asic_type == CHIP_VANGOGH)
4231                 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4232 }
4233
4234 static int
4235 validate_dcc(struct amdgpu_device *adev,
4236              const enum surface_pixel_format format,
4237              const enum dc_rotation_angle rotation,
4238              const union dc_tiling_info *tiling_info,
4239              const struct dc_plane_dcc_param *dcc,
4240              const struct dc_plane_address *address,
4241              const struct plane_size *plane_size)
4242 {
4243         struct dc *dc = adev->dm.dc;
4244         struct dc_dcc_surface_param input;
4245         struct dc_surface_dcc_cap output;
4246
4247         memset(&input, 0, sizeof(input));
4248         memset(&output, 0, sizeof(output));
4249
4250         if (!dcc->enable)
4251                 return 0;
4252
4253         if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4254             !dc->cap_funcs.get_dcc_compression_cap)
4255                 return -EINVAL;
4256
4257         input.format = format;
4258         input.surface_size.width = plane_size->surface_size.width;
4259         input.surface_size.height = plane_size->surface_size.height;
4260         input.swizzle_mode = tiling_info->gfx9.swizzle;
4261
4262         if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4263                 input.scan = SCAN_DIRECTION_HORIZONTAL;
4264         else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4265                 input.scan = SCAN_DIRECTION_VERTICAL;
4266
4267         if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4268                 return -EINVAL;
4269
4270         if (!output.capable)
4271                 return -EINVAL;
4272
4273         if (dcc->independent_64b_blks == 0 &&
4274             output.grph.rgb.independent_64b_blks != 0)
4275                 return -EINVAL;
4276
4277         return 0;
4278 }
4279
4280 static bool
4281 modifier_has_dcc(uint64_t modifier)
4282 {
4283         return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4284 }
4285
4286 static unsigned
4287 modifier_gfx9_swizzle_mode(uint64_t modifier)
4288 {
4289         if (modifier == DRM_FORMAT_MOD_LINEAR)
4290                 return 0;
4291
4292         return AMD_FMT_MOD_GET(TILE, modifier);
4293 }
4294
4295 static const struct drm_format_info *
4296 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4297 {
4298         return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4299 }
4300
4301 static void
4302 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4303                                     union dc_tiling_info *tiling_info,
4304                                     uint64_t modifier)
4305 {
4306         unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4307         unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4308         unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4309         unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4310
4311         fill_gfx9_tiling_info_from_device(adev, tiling_info);
4312
4313         if (!IS_AMD_FMT_MOD(modifier))
4314                 return;
4315
4316         tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4317         tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4318
4319         if (adev->family >= AMDGPU_FAMILY_NV) {
4320                 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4321         } else {
4322                 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4323
4324                 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4325         }
4326 }
4327
4328 enum dm_micro_swizzle {
4329         MICRO_SWIZZLE_Z = 0,
4330         MICRO_SWIZZLE_S = 1,
4331         MICRO_SWIZZLE_D = 2,
4332         MICRO_SWIZZLE_R = 3
4333 };
4334
4335 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4336                                           uint32_t format,
4337                                           uint64_t modifier)
4338 {
4339         struct amdgpu_device *adev = drm_to_adev(plane->dev);
4340         const struct drm_format_info *info = drm_format_info(format);
4341         int i;
4342
4343         enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4344
4345         if (!info)
4346                 return false;
4347
4348         /*
4349          * We always have to allow these modifiers:
4350          * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4351          * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4352          */
4353         if (modifier == DRM_FORMAT_MOD_LINEAR ||
4354             modifier == DRM_FORMAT_MOD_INVALID) {
4355                 return true;
4356         }
4357
4358         /* Check that the modifier is on the list of the plane's supported modifiers. */
4359         for (i = 0; i < plane->modifier_count; i++) {
4360                 if (modifier == plane->modifiers[i])
4361                         break;
4362         }
4363         if (i == plane->modifier_count)
4364                 return false;
4365
4366         /*
4367          * For D swizzle the canonical modifier depends on the bpp, so check
4368          * it here.
4369          */
4370         if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4371             adev->family >= AMDGPU_FAMILY_NV) {
4372                 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4373                         return false;
4374         }
4375
4376         if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4377             info->cpp[0] < 8)
4378                 return false;
4379
4380         if (modifier_has_dcc(modifier)) {
4381                 /* Per radeonsi comments 16/64 bpp are more complicated. */
4382                 if (info->cpp[0] != 4)
4383                         return false;
4384                 /* We support multi-planar formats, but not when combined with
4385                  * additional DCC metadata planes. */
4386                 if (info->num_planes > 1)
4387                         return false;
4388         }
4389
4390         return true;
4391 }
4392
4393 static void
4394 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4395 {
4396         if (!*mods)
4397                 return;
4398
4399         if (*cap - *size < 1) {
4400                 uint64_t new_cap = *cap * 2;
4401                 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4402
4403                 if (!new_mods) {
4404                         kfree(*mods);
4405                         *mods = NULL;
4406                         return;
4407                 }
4408
4409                 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4410                 kfree(*mods);
4411                 *mods = new_mods;
4412                 *cap = new_cap;
4413         }
4414
4415         (*mods)[*size] = mod;
4416         *size += 1;
4417 }
4418
4419 static void
4420 add_gfx9_modifiers(const struct amdgpu_device *adev,
4421                    uint64_t **mods, uint64_t *size, uint64_t *capacity)
4422 {
4423         int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4424         int pipe_xor_bits = min(8, pipes +
4425                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4426         int bank_xor_bits = min(8 - pipe_xor_bits,
4427                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4428         int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4429                  ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4430
4431
4432         if (adev->family == AMDGPU_FAMILY_RV) {
4433                 /* Raven2 and later */
4434                 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4435
4436                 /*
4437                  * No _D DCC swizzles yet because we only allow 32bpp, which
4438                  * doesn't support _D on DCN
4439                  */
4440
4441                 if (has_constant_encode) {
4442                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4443                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4444                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4445                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4446                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4447                                     AMD_FMT_MOD_SET(DCC, 1) |
4448                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4449                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4450                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4451                 }
4452
4453                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4454                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4455                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4456                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4457                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4458                             AMD_FMT_MOD_SET(DCC, 1) |
4459                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4460                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4461                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4462
4463                 if (has_constant_encode) {
4464                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4465                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4466                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4467                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4468                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4469                                     AMD_FMT_MOD_SET(DCC, 1) |
4470                                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4471                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4472                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4473
4474                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4475                                     AMD_FMT_MOD_SET(RB, rb) |
4476                                     AMD_FMT_MOD_SET(PIPE, pipes));
4477                 }
4478
4479                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4480                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4481                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4482                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4483                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4484                             AMD_FMT_MOD_SET(DCC, 1) |
4485                             AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4486                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4487                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4488                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4489                             AMD_FMT_MOD_SET(RB, rb) |
4490                             AMD_FMT_MOD_SET(PIPE, pipes));
4491         }
4492
4493         /*
4494          * Only supported for 64bpp on Raven, will be filtered on format in
4495          * dm_plane_format_mod_supported.
4496          */
4497         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4498                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4499                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4500                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4501                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4502
4503         if (adev->family == AMDGPU_FAMILY_RV) {
4504                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4505                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4506                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4507                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4508                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4509         }
4510
4511         /*
4512          * Only supported for 64bpp on Raven, will be filtered on format in
4513          * dm_plane_format_mod_supported.
4514          */
4515         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4516                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4517                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4518
4519         if (adev->family == AMDGPU_FAMILY_RV) {
4520                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4521                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4522                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4523         }
4524 }
4525
4526 static void
4527 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4528                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
4529 {
4530         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4531
4532         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4533                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4534                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4535                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4536                     AMD_FMT_MOD_SET(DCC, 1) |
4537                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4538                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4539                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4540
4541         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4542                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4543                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4544                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4545                     AMD_FMT_MOD_SET(DCC, 1) |
4546                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4547                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4548                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4549                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4550
4551         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4552                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4553                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4554                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4555
4556         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4557                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4558                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4559                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4560
4561
4562         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4563         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4564                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4565                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4566
4567         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4568                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4569                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4570 }
4571
4572 static void
4573 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4574                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
4575 {
4576         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4577         int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4578
4579         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4580                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4581                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4582                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4583                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
4584                     AMD_FMT_MOD_SET(DCC, 1) |
4585                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4586                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4587                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4588                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4589
4590         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4591                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4592                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4593                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4594                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
4595                     AMD_FMT_MOD_SET(DCC, 1) |
4596                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4597                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4598                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4599                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4600                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4601
4602         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4603                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4604                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4605                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4606                     AMD_FMT_MOD_SET(PACKERS, pkrs));
4607
4608         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4609                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4610                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4611                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4612                     AMD_FMT_MOD_SET(PACKERS, pkrs));
4613
4614         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4615         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4616                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4617                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4618
4619         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4620                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4621                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4622 }
4623
4624 static int
4625 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4626 {
4627         uint64_t size = 0, capacity = 128;
4628         *mods = NULL;
4629
4630         /* We have not hooked up any pre-GFX9 modifiers. */
4631         if (adev->family < AMDGPU_FAMILY_AI)
4632                 return 0;
4633
4634         *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4635
4636         if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4637                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4638                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4639                 return *mods ? 0 : -ENOMEM;
4640         }
4641
4642         switch (adev->family) {
4643         case AMDGPU_FAMILY_AI:
4644         case AMDGPU_FAMILY_RV:
4645                 add_gfx9_modifiers(adev, mods, &size, &capacity);
4646                 break;
4647         case AMDGPU_FAMILY_NV:
4648         case AMDGPU_FAMILY_VGH:
4649                 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4650                         add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4651                 else
4652                         add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4653                 break;
4654         }
4655
4656         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4657
4658         /* INVALID marks the end of the list. */
4659         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4660
4661         if (!*mods)
4662                 return -ENOMEM;
4663
4664         return 0;
4665 }
4666
4667 static int
4668 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4669                                           const struct amdgpu_framebuffer *afb,
4670                                           const enum surface_pixel_format format,
4671                                           const enum dc_rotation_angle rotation,
4672                                           const struct plane_size *plane_size,
4673                                           union dc_tiling_info *tiling_info,
4674                                           struct dc_plane_dcc_param *dcc,
4675                                           struct dc_plane_address *address,
4676                                           const bool force_disable_dcc)
4677 {
4678         const uint64_t modifier = afb->base.modifier;
4679         int ret;
4680
4681         fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4682         tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4683
4684         if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4685                 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4686
4687                 dcc->enable = 1;
4688                 dcc->meta_pitch = afb->base.pitches[1];
4689                 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4690
4691                 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4692                 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4693         }
4694
4695         ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4696         if (ret)
4697                 return ret;
4698
4699         return 0;
4700 }
4701
4702 static int
4703 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4704                              const struct amdgpu_framebuffer *afb,
4705                              const enum surface_pixel_format format,
4706                              const enum dc_rotation_angle rotation,
4707                              const uint64_t tiling_flags,
4708                              union dc_tiling_info *tiling_info,
4709                              struct plane_size *plane_size,
4710                              struct dc_plane_dcc_param *dcc,
4711                              struct dc_plane_address *address,
4712                              bool tmz_surface,
4713                              bool force_disable_dcc)
4714 {
4715         const struct drm_framebuffer *fb = &afb->base;
4716         int ret;
4717
4718         memset(tiling_info, 0, sizeof(*tiling_info));
4719         memset(plane_size, 0, sizeof(*plane_size));
4720         memset(dcc, 0, sizeof(*dcc));
4721         memset(address, 0, sizeof(*address));
4722
4723         address->tmz_surface = tmz_surface;
4724
4725         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4726                 uint64_t addr = afb->address + fb->offsets[0];
4727
4728                 plane_size->surface_size.x = 0;
4729                 plane_size->surface_size.y = 0;
4730                 plane_size->surface_size.width = fb->width;
4731                 plane_size->surface_size.height = fb->height;
4732                 plane_size->surface_pitch =
4733                         fb->pitches[0] / fb->format->cpp[0];
4734
4735                 address->type = PLN_ADDR_TYPE_GRAPHICS;
4736                 address->grph.addr.low_part = lower_32_bits(addr);
4737                 address->grph.addr.high_part = upper_32_bits(addr);
4738         } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4739                 uint64_t luma_addr = afb->address + fb->offsets[0];
4740                 uint64_t chroma_addr = afb->address + fb->offsets[1];
4741
4742                 plane_size->surface_size.x = 0;
4743                 plane_size->surface_size.y = 0;
4744                 plane_size->surface_size.width = fb->width;
4745                 plane_size->surface_size.height = fb->height;
4746                 plane_size->surface_pitch =
4747                         fb->pitches[0] / fb->format->cpp[0];
4748
4749                 plane_size->chroma_size.x = 0;
4750                 plane_size->chroma_size.y = 0;
4751                 /* TODO: set these based on surface format */
4752                 plane_size->chroma_size.width = fb->width / 2;
4753                 plane_size->chroma_size.height = fb->height / 2;
4754
4755                 plane_size->chroma_pitch =
4756                         fb->pitches[1] / fb->format->cpp[1];
4757
4758                 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4759                 address->video_progressive.luma_addr.low_part =
4760                         lower_32_bits(luma_addr);
4761                 address->video_progressive.luma_addr.high_part =
4762                         upper_32_bits(luma_addr);
4763                 address->video_progressive.chroma_addr.low_part =
4764                         lower_32_bits(chroma_addr);
4765                 address->video_progressive.chroma_addr.high_part =
4766                         upper_32_bits(chroma_addr);
4767         }
4768
4769         if (adev->family >= AMDGPU_FAMILY_AI) {
4770                 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4771                                                                 rotation, plane_size,
4772                                                                 tiling_info, dcc,
4773                                                                 address,
4774                                                                 force_disable_dcc);
4775                 if (ret)
4776                         return ret;
4777         } else {
4778                 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4779         }
4780
4781         return 0;
4782 }
4783
4784 static void
4785 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4786                                bool *per_pixel_alpha, bool *global_alpha,
4787                                int *global_alpha_value)
4788 {
4789         *per_pixel_alpha = false;
4790         *global_alpha = false;
4791         *global_alpha_value = 0xff;
4792
4793         if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4794                 return;
4795
4796         if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4797                 static const uint32_t alpha_formats[] = {
4798                         DRM_FORMAT_ARGB8888,
4799                         DRM_FORMAT_RGBA8888,
4800                         DRM_FORMAT_ABGR8888,
4801                 };
4802                 uint32_t format = plane_state->fb->format->format;
4803                 unsigned int i;
4804
4805                 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4806                         if (format == alpha_formats[i]) {
4807                                 *per_pixel_alpha = true;
4808                                 break;
4809                         }
4810                 }
4811         }
4812
4813         if (plane_state->alpha < 0xffff) {
4814                 *global_alpha = true;
4815                 *global_alpha_value = plane_state->alpha >> 8;
4816         }
4817 }
4818
4819 static int
4820 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4821                             const enum surface_pixel_format format,
4822                             enum dc_color_space *color_space)
4823 {
4824         bool full_range;
4825
4826         *color_space = COLOR_SPACE_SRGB;
4827
4828         /* DRM color properties only affect non-RGB formats. */
4829         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4830                 return 0;
4831
4832         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4833
4834         switch (plane_state->color_encoding) {
4835         case DRM_COLOR_YCBCR_BT601:
4836                 if (full_range)
4837                         *color_space = COLOR_SPACE_YCBCR601;
4838                 else
4839                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4840                 break;
4841
4842         case DRM_COLOR_YCBCR_BT709:
4843                 if (full_range)
4844                         *color_space = COLOR_SPACE_YCBCR709;
4845                 else
4846                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4847                 break;
4848
4849         case DRM_COLOR_YCBCR_BT2020:
4850                 if (full_range)
4851                         *color_space = COLOR_SPACE_2020_YCBCR;
4852                 else
4853                         return -EINVAL;
4854                 break;
4855
4856         default:
4857                 return -EINVAL;
4858         }
4859
4860         return 0;
4861 }
4862
4863 static int
4864 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4865                             const struct drm_plane_state *plane_state,
4866                             const uint64_t tiling_flags,
4867                             struct dc_plane_info *plane_info,
4868                             struct dc_plane_address *address,
4869                             bool tmz_surface,
4870                             bool force_disable_dcc)
4871 {
4872         const struct drm_framebuffer *fb = plane_state->fb;
4873         const struct amdgpu_framebuffer *afb =
4874                 to_amdgpu_framebuffer(plane_state->fb);
4875         int ret;
4876
4877         memset(plane_info, 0, sizeof(*plane_info));
4878
4879         switch (fb->format->format) {
4880         case DRM_FORMAT_C8:
4881                 plane_info->format =
4882                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4883                 break;
4884         case DRM_FORMAT_RGB565:
4885                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4886                 break;
4887         case DRM_FORMAT_XRGB8888:
4888         case DRM_FORMAT_ARGB8888:
4889                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4890                 break;
4891         case DRM_FORMAT_XRGB2101010:
4892         case DRM_FORMAT_ARGB2101010:
4893                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4894                 break;
4895         case DRM_FORMAT_XBGR2101010:
4896         case DRM_FORMAT_ABGR2101010:
4897                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4898                 break;
4899         case DRM_FORMAT_XBGR8888:
4900         case DRM_FORMAT_ABGR8888:
4901                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4902                 break;
4903         case DRM_FORMAT_NV21:
4904                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4905                 break;
4906         case DRM_FORMAT_NV12:
4907                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4908                 break;
4909         case DRM_FORMAT_P010:
4910                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4911                 break;
4912         case DRM_FORMAT_XRGB16161616F:
4913         case DRM_FORMAT_ARGB16161616F:
4914                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4915                 break;
4916         case DRM_FORMAT_XBGR16161616F:
4917         case DRM_FORMAT_ABGR16161616F:
4918                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4919                 break;
4920         default:
4921                 DRM_ERROR(
4922                         "Unsupported screen format %p4cc\n",
4923                         &fb->format->format);
4924                 return -EINVAL;
4925         }
4926
4927         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4928         case DRM_MODE_ROTATE_0:
4929                 plane_info->rotation = ROTATION_ANGLE_0;
4930                 break;
4931         case DRM_MODE_ROTATE_90:
4932                 plane_info->rotation = ROTATION_ANGLE_90;
4933                 break;
4934         case DRM_MODE_ROTATE_180:
4935                 plane_info->rotation = ROTATION_ANGLE_180;
4936                 break;
4937         case DRM_MODE_ROTATE_270:
4938                 plane_info->rotation = ROTATION_ANGLE_270;
4939                 break;
4940         default:
4941                 plane_info->rotation = ROTATION_ANGLE_0;
4942                 break;
4943         }
4944
4945         plane_info->visible = true;
4946         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4947
4948         plane_info->layer_index = 0;
4949
4950         ret = fill_plane_color_attributes(plane_state, plane_info->format,
4951                                           &plane_info->color_space);
4952         if (ret)
4953                 return ret;
4954
4955         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4956                                            plane_info->rotation, tiling_flags,
4957                                            &plane_info->tiling_info,
4958                                            &plane_info->plane_size,
4959                                            &plane_info->dcc, address, tmz_surface,
4960                                            force_disable_dcc);
4961         if (ret)
4962                 return ret;
4963
4964         fill_blending_from_plane_state(
4965                 plane_state, &plane_info->per_pixel_alpha,
4966                 &plane_info->global_alpha, &plane_info->global_alpha_value);
4967
4968         return 0;
4969 }
4970
4971 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4972                                     struct dc_plane_state *dc_plane_state,
4973                                     struct drm_plane_state *plane_state,
4974                                     struct drm_crtc_state *crtc_state)
4975 {
4976         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4977         struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4978         struct dc_scaling_info scaling_info;
4979         struct dc_plane_info plane_info;
4980         int ret;
4981         bool force_disable_dcc = false;
4982
4983         ret = fill_dc_scaling_info(plane_state, &scaling_info);
4984         if (ret)
4985                 return ret;
4986
4987         dc_plane_state->src_rect = scaling_info.src_rect;
4988         dc_plane_state->dst_rect = scaling_info.dst_rect;
4989         dc_plane_state->clip_rect = scaling_info.clip_rect;
4990         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4991
4992         force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4993         ret = fill_dc_plane_info_and_addr(adev, plane_state,
4994                                           afb->tiling_flags,
4995                                           &plane_info,
4996                                           &dc_plane_state->address,
4997                                           afb->tmz_surface,
4998                                           force_disable_dcc);
4999         if (ret)
5000                 return ret;
5001
5002         dc_plane_state->format = plane_info.format;
5003         dc_plane_state->color_space = plane_info.color_space;
5004         dc_plane_state->format = plane_info.format;
5005         dc_plane_state->plane_size = plane_info.plane_size;
5006         dc_plane_state->rotation = plane_info.rotation;
5007         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5008         dc_plane_state->stereo_format = plane_info.stereo_format;
5009         dc_plane_state->tiling_info = plane_info.tiling_info;
5010         dc_plane_state->visible = plane_info.visible;
5011         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5012         dc_plane_state->global_alpha = plane_info.global_alpha;
5013         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5014         dc_plane_state->dcc = plane_info.dcc;
5015         dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5016         dc_plane_state->flip_int_enabled = true;
5017
5018         /*
5019          * Always set input transfer function, since plane state is refreshed
5020          * every time.
5021          */
5022         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5023         if (ret)
5024                 return ret;
5025
5026         return 0;
5027 }
5028
5029 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5030                                            const struct dm_connector_state *dm_state,
5031                                            struct dc_stream_state *stream)
5032 {
5033         enum amdgpu_rmx_type rmx_type;
5034
5035         struct rect src = { 0 }; /* viewport in composition space*/
5036         struct rect dst = { 0 }; /* stream addressable area */
5037
5038         /* no mode. nothing to be done */
5039         if (!mode)
5040                 return;
5041
5042         /* Full screen scaling by default */
5043         src.width = mode->hdisplay;
5044         src.height = mode->vdisplay;
5045         dst.width = stream->timing.h_addressable;
5046         dst.height = stream->timing.v_addressable;
5047
5048         if (dm_state) {
5049                 rmx_type = dm_state->scaling;
5050                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5051                         if (src.width * dst.height <
5052                                         src.height * dst.width) {
5053                                 /* height needs less upscaling/more downscaling */
5054                                 dst.width = src.width *
5055                                                 dst.height / src.height;
5056                         } else {
5057                                 /* width needs less upscaling/more downscaling */
5058                                 dst.height = src.height *
5059                                                 dst.width / src.width;
5060                         }
5061                 } else if (rmx_type == RMX_CENTER) {
5062                         dst = src;
5063                 }
5064
5065                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5066                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
5067
5068                 if (dm_state->underscan_enable) {
5069                         dst.x += dm_state->underscan_hborder / 2;
5070                         dst.y += dm_state->underscan_vborder / 2;
5071                         dst.width -= dm_state->underscan_hborder;
5072                         dst.height -= dm_state->underscan_vborder;
5073                 }
5074         }
5075
5076         stream->src = src;
5077         stream->dst = dst;
5078
5079         DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5080                       dst.x, dst.y, dst.width, dst.height);
5081
5082 }
5083
5084 static enum dc_color_depth
5085 convert_color_depth_from_display_info(const struct drm_connector *connector,
5086                                       bool is_y420, int requested_bpc)
5087 {
5088         uint8_t bpc;
5089
5090         if (is_y420) {
5091                 bpc = 8;
5092
5093                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5094                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5095                         bpc = 16;
5096                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5097                         bpc = 12;
5098                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5099                         bpc = 10;
5100         } else {
5101                 bpc = (uint8_t)connector->display_info.bpc;
5102                 /* Assume 8 bpc by default if no bpc is specified. */
5103                 bpc = bpc ? bpc : 8;
5104         }
5105
5106         if (requested_bpc > 0) {
5107                 /*
5108                  * Cap display bpc based on the user requested value.
5109                  *
5110                  * The value for state->max_bpc may not correctly updated
5111                  * depending on when the connector gets added to the state
5112                  * or if this was called outside of atomic check, so it
5113                  * can't be used directly.
5114                  */
5115                 bpc = min_t(u8, bpc, requested_bpc);
5116
5117                 /* Round down to the nearest even number. */
5118                 bpc = bpc - (bpc & 1);
5119         }
5120
5121         switch (bpc) {
5122         case 0:
5123                 /*
5124                  * Temporary Work around, DRM doesn't parse color depth for
5125                  * EDID revision before 1.4
5126                  * TODO: Fix edid parsing
5127                  */
5128                 return COLOR_DEPTH_888;
5129         case 6:
5130                 return COLOR_DEPTH_666;
5131         case 8:
5132                 return COLOR_DEPTH_888;
5133         case 10:
5134                 return COLOR_DEPTH_101010;
5135         case 12:
5136                 return COLOR_DEPTH_121212;
5137         case 14:
5138                 return COLOR_DEPTH_141414;
5139         case 16:
5140                 return COLOR_DEPTH_161616;
5141         default:
5142                 return COLOR_DEPTH_UNDEFINED;
5143         }
5144 }
5145
5146 static enum dc_aspect_ratio
5147 get_aspect_ratio(const struct drm_display_mode *mode_in)
5148 {
5149         /* 1-1 mapping, since both enums follow the HDMI spec. */
5150         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5151 }
5152
5153 static enum dc_color_space
5154 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5155 {
5156         enum dc_color_space color_space = COLOR_SPACE_SRGB;
5157
5158         switch (dc_crtc_timing->pixel_encoding) {
5159         case PIXEL_ENCODING_YCBCR422:
5160         case PIXEL_ENCODING_YCBCR444:
5161         case PIXEL_ENCODING_YCBCR420:
5162         {
5163                 /*
5164                  * 27030khz is the separation point between HDTV and SDTV
5165                  * according to HDMI spec, we use YCbCr709 and YCbCr601
5166                  * respectively
5167                  */
5168                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
5169                         if (dc_crtc_timing->flags.Y_ONLY)
5170                                 color_space =
5171                                         COLOR_SPACE_YCBCR709_LIMITED;
5172                         else
5173                                 color_space = COLOR_SPACE_YCBCR709;
5174                 } else {
5175                         if (dc_crtc_timing->flags.Y_ONLY)
5176                                 color_space =
5177                                         COLOR_SPACE_YCBCR601_LIMITED;
5178                         else
5179                                 color_space = COLOR_SPACE_YCBCR601;
5180                 }
5181
5182         }
5183         break;
5184         case PIXEL_ENCODING_RGB:
5185                 color_space = COLOR_SPACE_SRGB;
5186                 break;
5187
5188         default:
5189                 WARN_ON(1);
5190                 break;
5191         }
5192
5193         return color_space;
5194 }
5195
5196 static bool adjust_colour_depth_from_display_info(
5197         struct dc_crtc_timing *timing_out,
5198         const struct drm_display_info *info)
5199 {
5200         enum dc_color_depth depth = timing_out->display_color_depth;
5201         int normalized_clk;
5202         do {
5203                 normalized_clk = timing_out->pix_clk_100hz / 10;
5204                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5205                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5206                         normalized_clk /= 2;
5207                 /* Adjusting pix clock following on HDMI spec based on colour depth */
5208                 switch (depth) {
5209                 case COLOR_DEPTH_888:
5210                         break;
5211                 case COLOR_DEPTH_101010:
5212                         normalized_clk = (normalized_clk * 30) / 24;
5213                         break;
5214                 case COLOR_DEPTH_121212:
5215                         normalized_clk = (normalized_clk * 36) / 24;
5216                         break;
5217                 case COLOR_DEPTH_161616:
5218                         normalized_clk = (normalized_clk * 48) / 24;
5219                         break;
5220                 default:
5221                         /* The above depths are the only ones valid for HDMI. */
5222                         return false;
5223                 }
5224                 if (normalized_clk <= info->max_tmds_clock) {
5225                         timing_out->display_color_depth = depth;
5226                         return true;
5227                 }
5228         } while (--depth > COLOR_DEPTH_666);
5229         return false;
5230 }
5231
5232 static void fill_stream_properties_from_drm_display_mode(
5233         struct dc_stream_state *stream,
5234         const struct drm_display_mode *mode_in,
5235         const struct drm_connector *connector,
5236         const struct drm_connector_state *connector_state,
5237         const struct dc_stream_state *old_stream,
5238         int requested_bpc)
5239 {
5240         struct dc_crtc_timing *timing_out = &stream->timing;
5241         const struct drm_display_info *info = &connector->display_info;
5242         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5243         struct hdmi_vendor_infoframe hv_frame;
5244         struct hdmi_avi_infoframe avi_frame;
5245
5246         memset(&hv_frame, 0, sizeof(hv_frame));
5247         memset(&avi_frame, 0, sizeof(avi_frame));
5248
5249         timing_out->h_border_left = 0;
5250         timing_out->h_border_right = 0;
5251         timing_out->v_border_top = 0;
5252         timing_out->v_border_bottom = 0;
5253         /* TODO: un-hardcode */
5254         if (drm_mode_is_420_only(info, mode_in)
5255                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5256                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5257         else if (drm_mode_is_420_also(info, mode_in)
5258                         && aconnector->force_yuv420_output)
5259                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5260         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5261                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5262                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5263         else
5264                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5265
5266         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5267         timing_out->display_color_depth = convert_color_depth_from_display_info(
5268                 connector,
5269                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5270                 requested_bpc);
5271         timing_out->scan_type = SCANNING_TYPE_NODATA;
5272         timing_out->hdmi_vic = 0;
5273
5274         if(old_stream) {
5275                 timing_out->vic = old_stream->timing.vic;
5276                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5277                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5278         } else {
5279                 timing_out->vic = drm_match_cea_mode(mode_in);
5280                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5281                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5282                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5283                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5284         }
5285
5286         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5287                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5288                 timing_out->vic = avi_frame.video_code;
5289                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5290                 timing_out->hdmi_vic = hv_frame.vic;
5291         }
5292
5293         if (is_freesync_video_mode(mode_in, aconnector)) {
5294                 timing_out->h_addressable = mode_in->hdisplay;
5295                 timing_out->h_total = mode_in->htotal;
5296                 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5297                 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5298                 timing_out->v_total = mode_in->vtotal;
5299                 timing_out->v_addressable = mode_in->vdisplay;
5300                 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5301                 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5302                 timing_out->pix_clk_100hz = mode_in->clock * 10;
5303         } else {
5304                 timing_out->h_addressable = mode_in->crtc_hdisplay;
5305                 timing_out->h_total = mode_in->crtc_htotal;
5306                 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5307                 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5308                 timing_out->v_total = mode_in->crtc_vtotal;
5309                 timing_out->v_addressable = mode_in->crtc_vdisplay;
5310                 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5311                 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5312                 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5313         }
5314
5315         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5316
5317         stream->output_color_space = get_output_color_space(timing_out);
5318
5319         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5320         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5321         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5322                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5323                     drm_mode_is_420_also(info, mode_in) &&
5324                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5325                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5326                         adjust_colour_depth_from_display_info(timing_out, info);
5327                 }
5328         }
5329 }
5330
5331 static void fill_audio_info(struct audio_info *audio_info,
5332                             const struct drm_connector *drm_connector,
5333                             const struct dc_sink *dc_sink)
5334 {
5335         int i = 0;
5336         int cea_revision = 0;
5337         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5338
5339         audio_info->manufacture_id = edid_caps->manufacturer_id;
5340         audio_info->product_id = edid_caps->product_id;
5341
5342         cea_revision = drm_connector->display_info.cea_rev;
5343
5344         strscpy(audio_info->display_name,
5345                 edid_caps->display_name,
5346                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5347
5348         if (cea_revision >= 3) {
5349                 audio_info->mode_count = edid_caps->audio_mode_count;
5350
5351                 for (i = 0; i < audio_info->mode_count; ++i) {
5352                         audio_info->modes[i].format_code =
5353                                         (enum audio_format_code)
5354                                         (edid_caps->audio_modes[i].format_code);
5355                         audio_info->modes[i].channel_count =
5356                                         edid_caps->audio_modes[i].channel_count;
5357                         audio_info->modes[i].sample_rates.all =
5358                                         edid_caps->audio_modes[i].sample_rate;
5359                         audio_info->modes[i].sample_size =
5360                                         edid_caps->audio_modes[i].sample_size;
5361                 }
5362         }
5363
5364         audio_info->flags.all = edid_caps->speaker_flags;
5365
5366         /* TODO: We only check for the progressive mode, check for interlace mode too */
5367         if (drm_connector->latency_present[0]) {
5368                 audio_info->video_latency = drm_connector->video_latency[0];
5369                 audio_info->audio_latency = drm_connector->audio_latency[0];
5370         }
5371
5372         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5373
5374 }
5375
5376 static void
5377 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5378                                       struct drm_display_mode *dst_mode)
5379 {
5380         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5381         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5382         dst_mode->crtc_clock = src_mode->crtc_clock;
5383         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5384         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5385         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5386         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5387         dst_mode->crtc_htotal = src_mode->crtc_htotal;
5388         dst_mode->crtc_hskew = src_mode->crtc_hskew;
5389         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5390         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5391         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5392         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5393         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5394 }
5395
5396 static void
5397 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5398                                         const struct drm_display_mode *native_mode,
5399                                         bool scale_enabled)
5400 {
5401         if (scale_enabled) {
5402                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5403         } else if (native_mode->clock == drm_mode->clock &&
5404                         native_mode->htotal == drm_mode->htotal &&
5405                         native_mode->vtotal == drm_mode->vtotal) {
5406                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5407         } else {
5408                 /* no scaling nor amdgpu inserted, no need to patch */
5409         }
5410 }
5411
5412 static struct dc_sink *
5413 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5414 {
5415         struct dc_sink_init_data sink_init_data = { 0 };
5416         struct dc_sink *sink = NULL;
5417         sink_init_data.link = aconnector->dc_link;
5418         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5419
5420         sink = dc_sink_create(&sink_init_data);
5421         if (!sink) {
5422                 DRM_ERROR("Failed to create sink!\n");
5423                 return NULL;
5424         }
5425         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5426
5427         return sink;
5428 }
5429
5430 static void set_multisync_trigger_params(
5431                 struct dc_stream_state *stream)
5432 {
5433         struct dc_stream_state *master = NULL;
5434
5435         if (stream->triggered_crtc_reset.enabled) {
5436                 master = stream->triggered_crtc_reset.event_source;
5437                 stream->triggered_crtc_reset.event =
5438                         master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5439                         CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5440                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5441         }
5442 }
5443
5444 static void set_master_stream(struct dc_stream_state *stream_set[],
5445                               int stream_count)
5446 {
5447         int j, highest_rfr = 0, master_stream = 0;
5448
5449         for (j = 0;  j < stream_count; j++) {
5450                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5451                         int refresh_rate = 0;
5452
5453                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5454                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5455                         if (refresh_rate > highest_rfr) {
5456                                 highest_rfr = refresh_rate;
5457                                 master_stream = j;
5458                         }
5459                 }
5460         }
5461         for (j = 0;  j < stream_count; j++) {
5462                 if (stream_set[j])
5463                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5464         }
5465 }
5466
5467 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5468 {
5469         int i = 0;
5470         struct dc_stream_state *stream;
5471
5472         if (context->stream_count < 2)
5473                 return;
5474         for (i = 0; i < context->stream_count ; i++) {
5475                 if (!context->streams[i])
5476                         continue;
5477                 /*
5478                  * TODO: add a function to read AMD VSDB bits and set
5479                  * crtc_sync_master.multi_sync_enabled flag
5480                  * For now it's set to false
5481                  */
5482         }
5483
5484         set_master_stream(context->streams, context->stream_count);
5485
5486         for (i = 0; i < context->stream_count ; i++) {
5487                 stream = context->streams[i];
5488
5489                 if (!stream)
5490                         continue;
5491
5492                 set_multisync_trigger_params(stream);
5493         }
5494 }
5495
5496 static struct drm_display_mode *
5497 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5498                           bool use_probed_modes)
5499 {
5500         struct drm_display_mode *m, *m_pref = NULL;
5501         u16 current_refresh, highest_refresh;
5502         struct list_head *list_head = use_probed_modes ?
5503                                                     &aconnector->base.probed_modes :
5504                                                     &aconnector->base.modes;
5505
5506         if (aconnector->freesync_vid_base.clock != 0)
5507                 return &aconnector->freesync_vid_base;
5508
5509         /* Find the preferred mode */
5510         list_for_each_entry (m, list_head, head) {
5511                 if (m->type & DRM_MODE_TYPE_PREFERRED) {
5512                         m_pref = m;
5513                         break;
5514                 }
5515         }
5516
5517         if (!m_pref) {
5518                 /* Probably an EDID with no preferred mode. Fallback to first entry */
5519                 m_pref = list_first_entry_or_null(
5520                         &aconnector->base.modes, struct drm_display_mode, head);
5521                 if (!m_pref) {
5522                         DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5523                         return NULL;
5524                 }
5525         }
5526
5527         highest_refresh = drm_mode_vrefresh(m_pref);
5528
5529         /*
5530          * Find the mode with highest refresh rate with same resolution.
5531          * For some monitors, preferred mode is not the mode with highest
5532          * supported refresh rate.
5533          */
5534         list_for_each_entry (m, list_head, head) {
5535                 current_refresh  = drm_mode_vrefresh(m);
5536
5537                 if (m->hdisplay == m_pref->hdisplay &&
5538                     m->vdisplay == m_pref->vdisplay &&
5539                     highest_refresh < current_refresh) {
5540                         highest_refresh = current_refresh;
5541                         m_pref = m;
5542                 }
5543         }
5544
5545         aconnector->freesync_vid_base = *m_pref;
5546         return m_pref;
5547 }
5548
5549 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
5550                                    struct amdgpu_dm_connector *aconnector)
5551 {
5552         struct drm_display_mode *high_mode;
5553         int timing_diff;
5554
5555         high_mode = get_highest_refresh_rate_mode(aconnector, false);
5556         if (!high_mode || !mode)
5557                 return false;
5558
5559         timing_diff = high_mode->vtotal - mode->vtotal;
5560
5561         if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5562             high_mode->hdisplay != mode->hdisplay ||
5563             high_mode->vdisplay != mode->vdisplay ||
5564             high_mode->hsync_start != mode->hsync_start ||
5565             high_mode->hsync_end != mode->hsync_end ||
5566             high_mode->htotal != mode->htotal ||
5567             high_mode->hskew != mode->hskew ||
5568             high_mode->vscan != mode->vscan ||
5569             high_mode->vsync_start - mode->vsync_start != timing_diff ||
5570             high_mode->vsync_end - mode->vsync_end != timing_diff)
5571                 return false;
5572         else
5573                 return true;
5574 }
5575
5576 static struct dc_stream_state *
5577 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5578                        const struct drm_display_mode *drm_mode,
5579                        const struct dm_connector_state *dm_state,
5580                        const struct dc_stream_state *old_stream,
5581                        int requested_bpc)
5582 {
5583         struct drm_display_mode *preferred_mode = NULL;
5584         struct drm_connector *drm_connector;
5585         const struct drm_connector_state *con_state =
5586                 dm_state ? &dm_state->base : NULL;
5587         struct dc_stream_state *stream = NULL;
5588         struct drm_display_mode mode = *drm_mode;
5589         struct drm_display_mode saved_mode;
5590         struct drm_display_mode *freesync_mode = NULL;
5591         bool native_mode_found = false;
5592         bool recalculate_timing = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5593         int mode_refresh;
5594         int preferred_refresh = 0;
5595 #if defined(CONFIG_DRM_AMD_DC_DCN)
5596         struct dsc_dec_dpcd_caps dsc_caps;
5597         uint32_t link_bandwidth_kbps;
5598 #endif
5599         struct dc_sink *sink = NULL;
5600
5601         memset(&saved_mode, 0, sizeof(saved_mode));
5602
5603         if (aconnector == NULL) {
5604                 DRM_ERROR("aconnector is NULL!\n");
5605                 return stream;
5606         }
5607
5608         drm_connector = &aconnector->base;
5609
5610         if (!aconnector->dc_sink) {
5611                 sink = create_fake_sink(aconnector);
5612                 if (!sink)
5613                         return stream;
5614         } else {
5615                 sink = aconnector->dc_sink;
5616                 dc_sink_retain(sink);
5617         }
5618
5619         stream = dc_create_stream_for_sink(sink);
5620
5621         if (stream == NULL) {
5622                 DRM_ERROR("Failed to create stream for sink!\n");
5623                 goto finish;
5624         }
5625
5626         stream->dm_stream_context = aconnector;
5627
5628         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5629                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5630
5631         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5632                 /* Search for preferred mode */
5633                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5634                         native_mode_found = true;
5635                         break;
5636                 }
5637         }
5638         if (!native_mode_found)
5639                 preferred_mode = list_first_entry_or_null(
5640                                 &aconnector->base.modes,
5641                                 struct drm_display_mode,
5642                                 head);
5643
5644         mode_refresh = drm_mode_vrefresh(&mode);
5645
5646         if (preferred_mode == NULL) {
5647                 /*
5648                  * This may not be an error, the use case is when we have no
5649                  * usermode calls to reset and set mode upon hotplug. In this
5650                  * case, we call set mode ourselves to restore the previous mode
5651                  * and the modelist may not be filled in in time.
5652                  */
5653                 DRM_DEBUG_DRIVER("No preferred mode found\n");
5654         } else {
5655                 recalculate_timing |= amdgpu_freesync_vid_mode &&
5656                                  is_freesync_video_mode(&mode, aconnector);
5657                 if (recalculate_timing) {
5658                         freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5659                         saved_mode = mode;
5660                         mode = *freesync_mode;
5661                 } else {
5662                         decide_crtc_timing_for_drm_display_mode(
5663                                 &mode, preferred_mode,
5664                                 dm_state ? (dm_state->scaling != RMX_OFF) : false);
5665                 }
5666
5667                 preferred_refresh = drm_mode_vrefresh(preferred_mode);
5668         }
5669
5670         if (recalculate_timing)
5671                 drm_mode_set_crtcinfo(&saved_mode, 0);
5672         else if (!dm_state)
5673                 drm_mode_set_crtcinfo(&mode, 0);
5674
5675        /*
5676         * If scaling is enabled and refresh rate didn't change
5677         * we copy the vic and polarities of the old timings
5678         */
5679         if (!recalculate_timing || mode_refresh != preferred_refresh)
5680                 fill_stream_properties_from_drm_display_mode(
5681                         stream, &mode, &aconnector->base, con_state, NULL,
5682                         requested_bpc);
5683         else
5684                 fill_stream_properties_from_drm_display_mode(
5685                         stream, &mode, &aconnector->base, con_state, old_stream,
5686                         requested_bpc);
5687
5688         stream->timing.flags.DSC = 0;
5689
5690         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5691 #if defined(CONFIG_DRM_AMD_DC_DCN)
5692                 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5693                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5694                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5695                                       &dsc_caps);
5696                 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5697                                                              dc_link_get_link_cap(aconnector->dc_link));
5698
5699                 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5700                         /* Set DSC policy according to dsc_clock_en */
5701                         dc_dsc_policy_set_enable_dsc_when_not_needed(
5702                                 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5703
5704                         if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5705                                                   &dsc_caps,
5706                                                   aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5707                                                   0,
5708                                                   link_bandwidth_kbps,
5709                                                   &stream->timing,
5710                                                   &stream->timing.dsc_cfg))
5711                                 stream->timing.flags.DSC = 1;
5712                         /* Overwrite the stream flag if DSC is enabled through debugfs */
5713                         if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5714                                 stream->timing.flags.DSC = 1;
5715
5716                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5717                                 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5718
5719                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5720                                 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5721
5722                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5723                                 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5724                 }
5725 #endif
5726         }
5727
5728         update_stream_scaling_settings(&mode, dm_state, stream);
5729
5730         fill_audio_info(
5731                 &stream->audio_info,
5732                 drm_connector,
5733                 sink);
5734
5735         update_stream_signal(stream, sink);
5736
5737         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5738                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5739
5740         if (stream->link->psr_settings.psr_feature_enabled) {
5741                 //
5742                 // should decide stream support vsc sdp colorimetry capability
5743                 // before building vsc info packet
5744                 //
5745                 stream->use_vsc_sdp_for_colorimetry = false;
5746                 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5747                         stream->use_vsc_sdp_for_colorimetry =
5748                                 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5749                 } else {
5750                         if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5751                                 stream->use_vsc_sdp_for_colorimetry = true;
5752                 }
5753                 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5754         }
5755 finish:
5756         dc_sink_release(sink);
5757
5758         return stream;
5759 }
5760
5761 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5762 {
5763         drm_crtc_cleanup(crtc);
5764         kfree(crtc);
5765 }
5766
5767 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5768                                   struct drm_crtc_state *state)
5769 {
5770         struct dm_crtc_state *cur = to_dm_crtc_state(state);
5771
5772         /* TODO Destroy dc_stream objects are stream object is flattened */
5773         if (cur->stream)
5774                 dc_stream_release(cur->stream);
5775
5776
5777         __drm_atomic_helper_crtc_destroy_state(state);
5778
5779
5780         kfree(state);
5781 }
5782
5783 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5784 {
5785         struct dm_crtc_state *state;
5786
5787         if (crtc->state)
5788                 dm_crtc_destroy_state(crtc, crtc->state);
5789
5790         state = kzalloc(sizeof(*state), GFP_KERNEL);
5791         if (WARN_ON(!state))
5792                 return;
5793
5794         __drm_atomic_helper_crtc_reset(crtc, &state->base);
5795 }
5796
5797 static struct drm_crtc_state *
5798 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5799 {
5800         struct dm_crtc_state *state, *cur;
5801
5802         cur = to_dm_crtc_state(crtc->state);
5803
5804         if (WARN_ON(!crtc->state))
5805                 return NULL;
5806
5807         state = kzalloc(sizeof(*state), GFP_KERNEL);
5808         if (!state)
5809                 return NULL;
5810
5811         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5812
5813         if (cur->stream) {
5814                 state->stream = cur->stream;
5815                 dc_stream_retain(state->stream);
5816         }
5817
5818         state->active_planes = cur->active_planes;
5819         state->vrr_infopacket = cur->vrr_infopacket;
5820         state->abm_level = cur->abm_level;
5821         state->vrr_supported = cur->vrr_supported;
5822         state->freesync_config = cur->freesync_config;
5823         state->cm_has_degamma = cur->cm_has_degamma;
5824         state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5825         /* TODO Duplicate dc_stream after objects are stream object is flattened */
5826
5827         return &state->base;
5828 }
5829
5830 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
5831 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
5832 {
5833         crtc_debugfs_init(crtc);
5834
5835         return 0;
5836 }
5837 #endif
5838
5839 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5840 {
5841         enum dc_irq_source irq_source;
5842         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5843         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5844         int rc;
5845
5846         irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5847
5848         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5849
5850         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
5851                       acrtc->crtc_id, enable ? "en" : "dis", rc);
5852         return rc;
5853 }
5854
5855 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5856 {
5857         enum dc_irq_source irq_source;
5858         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5859         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5860         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5861 #if defined(CONFIG_DRM_AMD_DC_DCN)
5862         struct amdgpu_display_manager *dm = &adev->dm;
5863         unsigned long flags;
5864 #endif
5865         int rc = 0;
5866
5867         if (enable) {
5868                 /* vblank irq on -> Only need vupdate irq in vrr mode */
5869                 if (amdgpu_dm_vrr_active(acrtc_state))
5870                         rc = dm_set_vupdate_irq(crtc, true);
5871         } else {
5872                 /* vblank irq off -> vupdate irq off */
5873                 rc = dm_set_vupdate_irq(crtc, false);
5874         }
5875
5876         if (rc)
5877                 return rc;
5878
5879         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5880
5881         if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5882                 return -EBUSY;
5883
5884         if (amdgpu_in_reset(adev))
5885                 return 0;
5886
5887 #if defined(CONFIG_DRM_AMD_DC_DCN)
5888         spin_lock_irqsave(&dm->vblank_lock, flags);
5889         dm->vblank_workqueue->dm = dm;
5890         dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
5891         dm->vblank_workqueue->enable = enable;
5892         spin_unlock_irqrestore(&dm->vblank_lock, flags);
5893         schedule_work(&dm->vblank_workqueue->mall_work);
5894 #endif
5895
5896         return 0;
5897 }
5898
5899 static int dm_enable_vblank(struct drm_crtc *crtc)
5900 {
5901         return dm_set_vblank(crtc, true);
5902 }
5903
5904 static void dm_disable_vblank(struct drm_crtc *crtc)
5905 {
5906         dm_set_vblank(crtc, false);
5907 }
5908
5909 /* Implemented only the options currently availible for the driver */
5910 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5911         .reset = dm_crtc_reset_state,
5912         .destroy = amdgpu_dm_crtc_destroy,
5913         .set_config = drm_atomic_helper_set_config,
5914         .page_flip = drm_atomic_helper_page_flip,
5915         .atomic_duplicate_state = dm_crtc_duplicate_state,
5916         .atomic_destroy_state = dm_crtc_destroy_state,
5917         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
5918         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5919         .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5920         .get_vblank_counter = amdgpu_get_vblank_counter_kms,
5921         .enable_vblank = dm_enable_vblank,
5922         .disable_vblank = dm_disable_vblank,
5923         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5924 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
5925         .late_register = amdgpu_dm_crtc_late_register,
5926 #endif
5927 };
5928
5929 static enum drm_connector_status
5930 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5931 {
5932         bool connected;
5933         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5934
5935         /*
5936          * Notes:
5937          * 1. This interface is NOT called in context of HPD irq.
5938          * 2. This interface *is called* in context of user-mode ioctl. Which
5939          * makes it a bad place for *any* MST-related activity.
5940          */
5941
5942         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5943             !aconnector->fake_enable)
5944                 connected = (aconnector->dc_sink != NULL);
5945         else
5946                 connected = (aconnector->base.force == DRM_FORCE_ON);
5947
5948         update_subconnector_property(aconnector);
5949
5950         return (connected ? connector_status_connected :
5951                         connector_status_disconnected);
5952 }
5953
5954 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5955                                             struct drm_connector_state *connector_state,
5956                                             struct drm_property *property,
5957                                             uint64_t val)
5958 {
5959         struct drm_device *dev = connector->dev;
5960         struct amdgpu_device *adev = drm_to_adev(dev);
5961         struct dm_connector_state *dm_old_state =
5962                 to_dm_connector_state(connector->state);
5963         struct dm_connector_state *dm_new_state =
5964                 to_dm_connector_state(connector_state);
5965
5966         int ret = -EINVAL;
5967
5968         if (property == dev->mode_config.scaling_mode_property) {
5969                 enum amdgpu_rmx_type rmx_type;
5970
5971                 switch (val) {
5972                 case DRM_MODE_SCALE_CENTER:
5973                         rmx_type = RMX_CENTER;
5974                         break;
5975                 case DRM_MODE_SCALE_ASPECT:
5976                         rmx_type = RMX_ASPECT;
5977                         break;
5978                 case DRM_MODE_SCALE_FULLSCREEN:
5979                         rmx_type = RMX_FULL;
5980                         break;
5981                 case DRM_MODE_SCALE_NONE:
5982                 default:
5983                         rmx_type = RMX_OFF;
5984                         break;
5985                 }
5986
5987                 if (dm_old_state->scaling == rmx_type)
5988                         return 0;
5989
5990                 dm_new_state->scaling = rmx_type;
5991                 ret = 0;
5992         } else if (property == adev->mode_info.underscan_hborder_property) {
5993                 dm_new_state->underscan_hborder = val;
5994                 ret = 0;
5995         } else if (property == adev->mode_info.underscan_vborder_property) {
5996                 dm_new_state->underscan_vborder = val;
5997                 ret = 0;
5998         } else if (property == adev->mode_info.underscan_property) {
5999                 dm_new_state->underscan_enable = val;
6000                 ret = 0;
6001         } else if (property == adev->mode_info.abm_level_property) {
6002                 dm_new_state->abm_level = val;
6003                 ret = 0;
6004         }
6005
6006         return ret;
6007 }
6008
6009 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6010                                             const struct drm_connector_state *state,
6011                                             struct drm_property *property,
6012                                             uint64_t *val)
6013 {
6014         struct drm_device *dev = connector->dev;
6015         struct amdgpu_device *adev = drm_to_adev(dev);
6016         struct dm_connector_state *dm_state =
6017                 to_dm_connector_state(state);
6018         int ret = -EINVAL;
6019
6020         if (property == dev->mode_config.scaling_mode_property) {
6021                 switch (dm_state->scaling) {
6022                 case RMX_CENTER:
6023                         *val = DRM_MODE_SCALE_CENTER;
6024                         break;
6025                 case RMX_ASPECT:
6026                         *val = DRM_MODE_SCALE_ASPECT;
6027                         break;
6028                 case RMX_FULL:
6029                         *val = DRM_MODE_SCALE_FULLSCREEN;
6030                         break;
6031                 case RMX_OFF:
6032                 default:
6033                         *val = DRM_MODE_SCALE_NONE;
6034                         break;
6035                 }
6036                 ret = 0;
6037         } else if (property == adev->mode_info.underscan_hborder_property) {
6038                 *val = dm_state->underscan_hborder;
6039                 ret = 0;
6040         } else if (property == adev->mode_info.underscan_vborder_property) {
6041                 *val = dm_state->underscan_vborder;
6042                 ret = 0;
6043         } else if (property == adev->mode_info.underscan_property) {
6044                 *val = dm_state->underscan_enable;
6045                 ret = 0;
6046         } else if (property == adev->mode_info.abm_level_property) {
6047                 *val = dm_state->abm_level;
6048                 ret = 0;
6049         }
6050
6051         return ret;
6052 }
6053
6054 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6055 {
6056         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6057
6058         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6059 }
6060
6061 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6062 {
6063         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6064         const struct dc_link *link = aconnector->dc_link;
6065         struct amdgpu_device *adev = drm_to_adev(connector->dev);
6066         struct amdgpu_display_manager *dm = &adev->dm;
6067
6068         /*
6069          * Call only if mst_mgr was iniitalized before since it's not done
6070          * for all connector types.
6071          */
6072         if (aconnector->mst_mgr.dev)
6073                 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6074
6075 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6076         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6077
6078         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
6079             link->type != dc_connection_none &&
6080             dm->backlight_dev) {
6081                 backlight_device_unregister(dm->backlight_dev);
6082                 dm->backlight_dev = NULL;
6083         }
6084 #endif
6085
6086         if (aconnector->dc_em_sink)
6087                 dc_sink_release(aconnector->dc_em_sink);
6088         aconnector->dc_em_sink = NULL;
6089         if (aconnector->dc_sink)
6090                 dc_sink_release(aconnector->dc_sink);
6091         aconnector->dc_sink = NULL;
6092
6093         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6094         drm_connector_unregister(connector);
6095         drm_connector_cleanup(connector);
6096         if (aconnector->i2c) {
6097                 i2c_del_adapter(&aconnector->i2c->base);
6098                 kfree(aconnector->i2c);
6099         }
6100         kfree(aconnector->dm_dp_aux.aux.name);
6101
6102         kfree(connector);
6103 }
6104
6105 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6106 {
6107         struct dm_connector_state *state =
6108                 to_dm_connector_state(connector->state);
6109
6110         if (connector->state)
6111                 __drm_atomic_helper_connector_destroy_state(connector->state);
6112
6113         kfree(state);
6114
6115         state = kzalloc(sizeof(*state), GFP_KERNEL);
6116
6117         if (state) {
6118                 state->scaling = RMX_OFF;
6119                 state->underscan_enable = false;
6120                 state->underscan_hborder = 0;
6121                 state->underscan_vborder = 0;
6122                 state->base.max_requested_bpc = 8;
6123                 state->vcpi_slots = 0;
6124                 state->pbn = 0;
6125                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6126                         state->abm_level = amdgpu_dm_abm_level;
6127
6128                 __drm_atomic_helper_connector_reset(connector, &state->base);
6129         }
6130 }
6131
6132 struct drm_connector_state *
6133 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6134 {
6135         struct dm_connector_state *state =
6136                 to_dm_connector_state(connector->state);
6137
6138         struct dm_connector_state *new_state =
6139                         kmemdup(state, sizeof(*state), GFP_KERNEL);
6140
6141         if (!new_state)
6142                 return NULL;
6143
6144         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6145
6146         new_state->freesync_capable = state->freesync_capable;
6147         new_state->abm_level = state->abm_level;
6148         new_state->scaling = state->scaling;
6149         new_state->underscan_enable = state->underscan_enable;
6150         new_state->underscan_hborder = state->underscan_hborder;
6151         new_state->underscan_vborder = state->underscan_vborder;
6152         new_state->vcpi_slots = state->vcpi_slots;
6153         new_state->pbn = state->pbn;
6154         return &new_state->base;
6155 }
6156
6157 static int
6158 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6159 {
6160         struct amdgpu_dm_connector *amdgpu_dm_connector =
6161                 to_amdgpu_dm_connector(connector);
6162         int r;
6163
6164         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6165             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6166                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6167                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6168                 if (r)
6169                         return r;
6170         }
6171
6172 #if defined(CONFIG_DEBUG_FS)
6173         connector_debugfs_init(amdgpu_dm_connector);
6174 #endif
6175
6176         return 0;
6177 }
6178
6179 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6180         .reset = amdgpu_dm_connector_funcs_reset,
6181         .detect = amdgpu_dm_connector_detect,
6182         .fill_modes = drm_helper_probe_single_connector_modes,
6183         .destroy = amdgpu_dm_connector_destroy,
6184         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6185         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6186         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6187         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6188         .late_register = amdgpu_dm_connector_late_register,
6189         .early_unregister = amdgpu_dm_connector_unregister
6190 };
6191
6192 static int get_modes(struct drm_connector *connector)
6193 {
6194         return amdgpu_dm_connector_get_modes(connector);
6195 }
6196
6197 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6198 {
6199         struct dc_sink_init_data init_params = {
6200                         .link = aconnector->dc_link,
6201                         .sink_signal = SIGNAL_TYPE_VIRTUAL
6202         };
6203         struct edid *edid;
6204
6205         if (!aconnector->base.edid_blob_ptr) {
6206                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6207                                 aconnector->base.name);
6208
6209                 aconnector->base.force = DRM_FORCE_OFF;
6210                 aconnector->base.override_edid = false;
6211                 return;
6212         }
6213
6214         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6215
6216         aconnector->edid = edid;
6217
6218         aconnector->dc_em_sink = dc_link_add_remote_sink(
6219                 aconnector->dc_link,
6220                 (uint8_t *)edid,
6221                 (edid->extensions + 1) * EDID_LENGTH,
6222                 &init_params);
6223
6224         if (aconnector->base.force == DRM_FORCE_ON) {
6225                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6226                 aconnector->dc_link->local_sink :
6227                 aconnector->dc_em_sink;
6228                 dc_sink_retain(aconnector->dc_sink);
6229         }
6230 }
6231
6232 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6233 {
6234         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6235
6236         /*
6237          * In case of headless boot with force on for DP managed connector
6238          * Those settings have to be != 0 to get initial modeset
6239          */
6240         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6241                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6242                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6243         }
6244
6245
6246         aconnector->base.override_edid = true;
6247         create_eml_sink(aconnector);
6248 }
6249
6250 static struct dc_stream_state *
6251 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6252                                 const struct drm_display_mode *drm_mode,
6253                                 const struct dm_connector_state *dm_state,
6254                                 const struct dc_stream_state *old_stream)
6255 {
6256         struct drm_connector *connector = &aconnector->base;
6257         struct amdgpu_device *adev = drm_to_adev(connector->dev);
6258         struct dc_stream_state *stream;
6259         const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6260         int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6261         enum dc_status dc_result = DC_OK;
6262
6263         do {
6264                 stream = create_stream_for_sink(aconnector, drm_mode,
6265                                                 dm_state, old_stream,
6266                                                 requested_bpc);
6267                 if (stream == NULL) {
6268                         DRM_ERROR("Failed to create stream for sink!\n");
6269                         break;
6270                 }
6271
6272                 dc_result = dc_validate_stream(adev->dm.dc, stream);
6273
6274                 if (dc_result != DC_OK) {
6275                         DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6276                                       drm_mode->hdisplay,
6277                                       drm_mode->vdisplay,
6278                                       drm_mode->clock,
6279                                       dc_result,
6280                                       dc_status_to_str(dc_result));
6281
6282                         dc_stream_release(stream);
6283                         stream = NULL;
6284                         requested_bpc -= 2; /* lower bpc to retry validation */
6285                 }
6286
6287         } while (stream == NULL && requested_bpc >= 6);
6288
6289         if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6290                 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6291
6292                 aconnector->force_yuv420_output = true;
6293                 stream = create_validate_stream_for_sink(aconnector, drm_mode,
6294                                                 dm_state, old_stream);
6295                 aconnector->force_yuv420_output = false;
6296         }
6297
6298         return stream;
6299 }
6300
6301 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6302                                    struct drm_display_mode *mode)
6303 {
6304         int result = MODE_ERROR;
6305         struct dc_sink *dc_sink;
6306         /* TODO: Unhardcode stream count */
6307         struct dc_stream_state *stream;
6308         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6309
6310         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6311                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6312                 return result;
6313
6314         /*
6315          * Only run this the first time mode_valid is called to initilialize
6316          * EDID mgmt
6317          */
6318         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6319                 !aconnector->dc_em_sink)
6320                 handle_edid_mgmt(aconnector);
6321
6322         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6323
6324         if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6325                                 aconnector->base.force != DRM_FORCE_ON) {
6326                 DRM_ERROR("dc_sink is NULL!\n");
6327                 goto fail;
6328         }
6329
6330         stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6331         if (stream) {
6332                 dc_stream_release(stream);
6333                 result = MODE_OK;
6334         }
6335
6336 fail:
6337         /* TODO: error handling*/
6338         return result;
6339 }
6340
6341 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6342                                 struct dc_info_packet *out)
6343 {
6344         struct hdmi_drm_infoframe frame;
6345         unsigned char buf[30]; /* 26 + 4 */
6346         ssize_t len;
6347         int ret, i;
6348
6349         memset(out, 0, sizeof(*out));
6350
6351         if (!state->hdr_output_metadata)
6352                 return 0;
6353
6354         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6355         if (ret)
6356                 return ret;
6357
6358         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6359         if (len < 0)
6360                 return (int)len;
6361
6362         /* Static metadata is a fixed 26 bytes + 4 byte header. */
6363         if (len != 30)
6364                 return -EINVAL;
6365
6366         /* Prepare the infopacket for DC. */
6367         switch (state->connector->connector_type) {
6368         case DRM_MODE_CONNECTOR_HDMIA:
6369                 out->hb0 = 0x87; /* type */
6370                 out->hb1 = 0x01; /* version */
6371                 out->hb2 = 0x1A; /* length */
6372                 out->sb[0] = buf[3]; /* checksum */
6373                 i = 1;
6374                 break;
6375
6376         case DRM_MODE_CONNECTOR_DisplayPort:
6377         case DRM_MODE_CONNECTOR_eDP:
6378                 out->hb0 = 0x00; /* sdp id, zero */
6379                 out->hb1 = 0x87; /* type */
6380                 out->hb2 = 0x1D; /* payload len - 1 */
6381                 out->hb3 = (0x13 << 2); /* sdp version */
6382                 out->sb[0] = 0x01; /* version */
6383                 out->sb[1] = 0x1A; /* length */
6384                 i = 2;
6385                 break;
6386
6387         default:
6388                 return -EINVAL;
6389         }
6390
6391         memcpy(&out->sb[i], &buf[4], 26);
6392         out->valid = true;
6393
6394         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6395                        sizeof(out->sb), false);
6396
6397         return 0;
6398 }
6399
6400 static bool
6401 is_hdr_metadata_different(const struct drm_connector_state *old_state,
6402                           const struct drm_connector_state *new_state)
6403 {
6404         struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
6405         struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
6406
6407         if (old_blob != new_blob) {
6408                 if (old_blob && new_blob &&
6409                     old_blob->length == new_blob->length)
6410                         return memcmp(old_blob->data, new_blob->data,
6411                                       old_blob->length);
6412
6413                 return true;
6414         }
6415
6416         return false;
6417 }
6418
6419 static int
6420 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6421                                  struct drm_atomic_state *state)
6422 {
6423         struct drm_connector_state *new_con_state =
6424                 drm_atomic_get_new_connector_state(state, conn);
6425         struct drm_connector_state *old_con_state =
6426                 drm_atomic_get_old_connector_state(state, conn);
6427         struct drm_crtc *crtc = new_con_state->crtc;
6428         struct drm_crtc_state *new_crtc_state;
6429         int ret;
6430
6431         trace_amdgpu_dm_connector_atomic_check(new_con_state);
6432
6433         if (!crtc)
6434                 return 0;
6435
6436         if (is_hdr_metadata_different(old_con_state, new_con_state)) {
6437                 struct dc_info_packet hdr_infopacket;
6438
6439                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6440                 if (ret)
6441                         return ret;
6442
6443                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6444                 if (IS_ERR(new_crtc_state))
6445                         return PTR_ERR(new_crtc_state);
6446
6447                 /*
6448                  * DC considers the stream backends changed if the
6449                  * static metadata changes. Forcing the modeset also
6450                  * gives a simple way for userspace to switch from
6451                  * 8bpc to 10bpc when setting the metadata to enter
6452                  * or exit HDR.
6453                  *
6454                  * Changing the static metadata after it's been
6455                  * set is permissible, however. So only force a
6456                  * modeset if we're entering or exiting HDR.
6457                  */
6458                 new_crtc_state->mode_changed =
6459                         !old_con_state->hdr_output_metadata ||
6460                         !new_con_state->hdr_output_metadata;
6461         }
6462
6463         return 0;
6464 }
6465
6466 static const struct drm_connector_helper_funcs
6467 amdgpu_dm_connector_helper_funcs = {
6468         /*
6469          * If hotplugging a second bigger display in FB Con mode, bigger resolution
6470          * modes will be filtered by drm_mode_validate_size(), and those modes
6471          * are missing after user start lightdm. So we need to renew modes list.
6472          * in get_modes call back, not just return the modes count
6473          */
6474         .get_modes = get_modes,
6475         .mode_valid = amdgpu_dm_connector_mode_valid,
6476         .atomic_check = amdgpu_dm_connector_atomic_check,
6477 };
6478
6479 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6480 {
6481 }
6482
6483 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6484 {
6485         struct drm_atomic_state *state = new_crtc_state->state;
6486         struct drm_plane *plane;
6487         int num_active = 0;
6488
6489         drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6490                 struct drm_plane_state *new_plane_state;
6491
6492                 /* Cursor planes are "fake". */
6493                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6494                         continue;
6495
6496                 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6497
6498                 if (!new_plane_state) {
6499                         /*
6500                          * The plane is enable on the CRTC and hasn't changed
6501                          * state. This means that it previously passed
6502                          * validation and is therefore enabled.
6503                          */
6504                         num_active += 1;
6505                         continue;
6506                 }
6507
6508                 /* We need a framebuffer to be considered enabled. */
6509                 num_active += (new_plane_state->fb != NULL);
6510         }
6511
6512         return num_active;
6513 }
6514
6515 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6516                                          struct drm_crtc_state *new_crtc_state)
6517 {
6518         struct dm_crtc_state *dm_new_crtc_state =
6519                 to_dm_crtc_state(new_crtc_state);
6520
6521         dm_new_crtc_state->active_planes = 0;
6522
6523         if (!dm_new_crtc_state->stream)
6524                 return;
6525
6526         dm_new_crtc_state->active_planes =
6527                 count_crtc_active_planes(new_crtc_state);
6528 }
6529
6530 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6531                                        struct drm_atomic_state *state)
6532 {
6533         struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6534                                                                           crtc);
6535         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6536         struct dc *dc = adev->dm.dc;
6537         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6538         int ret = -EINVAL;
6539
6540         trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6541
6542         dm_update_crtc_active_planes(crtc, crtc_state);
6543
6544         if (unlikely(!dm_crtc_state->stream &&
6545                      modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6546                 WARN_ON(1);
6547                 return ret;
6548         }
6549
6550         /*
6551          * We require the primary plane to be enabled whenever the CRTC is, otherwise
6552          * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6553          * planes are disabled, which is not supported by the hardware. And there is legacy
6554          * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6555          */
6556         if (crtc_state->enable &&
6557             !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6558                 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6559                 return -EINVAL;
6560         }
6561
6562         /* In some use cases, like reset, no stream is attached */
6563         if (!dm_crtc_state->stream)
6564                 return 0;
6565
6566         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6567                 return 0;
6568
6569         DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6570         return ret;
6571 }
6572
6573 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6574                                       const struct drm_display_mode *mode,
6575                                       struct drm_display_mode *adjusted_mode)
6576 {
6577         return true;
6578 }
6579
6580 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6581         .disable = dm_crtc_helper_disable,
6582         .atomic_check = dm_crtc_helper_atomic_check,
6583         .mode_fixup = dm_crtc_helper_mode_fixup,
6584         .get_scanout_position = amdgpu_crtc_get_scanout_position,
6585 };
6586
6587 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6588 {
6589
6590 }
6591
6592 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6593 {
6594         switch (display_color_depth) {
6595                 case COLOR_DEPTH_666:
6596                         return 6;
6597                 case COLOR_DEPTH_888:
6598                         return 8;
6599                 case COLOR_DEPTH_101010:
6600                         return 10;
6601                 case COLOR_DEPTH_121212:
6602                         return 12;
6603                 case COLOR_DEPTH_141414:
6604                         return 14;
6605                 case COLOR_DEPTH_161616:
6606                         return 16;
6607                 default:
6608                         break;
6609                 }
6610         return 0;
6611 }
6612
6613 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6614                                           struct drm_crtc_state *crtc_state,
6615                                           struct drm_connector_state *conn_state)
6616 {
6617         struct drm_atomic_state *state = crtc_state->state;
6618         struct drm_connector *connector = conn_state->connector;
6619         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6620         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6621         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6622         struct drm_dp_mst_topology_mgr *mst_mgr;
6623         struct drm_dp_mst_port *mst_port;
6624         enum dc_color_depth color_depth;
6625         int clock, bpp = 0;
6626         bool is_y420 = false;
6627
6628         if (!aconnector->port || !aconnector->dc_sink)
6629                 return 0;
6630
6631         mst_port = aconnector->port;
6632         mst_mgr = &aconnector->mst_port->mst_mgr;
6633
6634         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6635                 return 0;
6636
6637         if (!state->duplicated) {
6638                 int max_bpc = conn_state->max_requested_bpc;
6639                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6640                                 aconnector->force_yuv420_output;
6641                 color_depth = convert_color_depth_from_display_info(connector,
6642                                                                     is_y420,
6643                                                                     max_bpc);
6644                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6645                 clock = adjusted_mode->clock;
6646                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6647         }
6648         dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6649                                                                            mst_mgr,
6650                                                                            mst_port,
6651                                                                            dm_new_connector_state->pbn,
6652                                                                            dm_mst_get_pbn_divider(aconnector->dc_link));
6653         if (dm_new_connector_state->vcpi_slots < 0) {
6654                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6655                 return dm_new_connector_state->vcpi_slots;
6656         }
6657         return 0;
6658 }
6659
6660 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6661         .disable = dm_encoder_helper_disable,
6662         .atomic_check = dm_encoder_helper_atomic_check
6663 };
6664
6665 #if defined(CONFIG_DRM_AMD_DC_DCN)
6666 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6667                                             struct dc_state *dc_state)
6668 {
6669         struct dc_stream_state *stream = NULL;
6670         struct drm_connector *connector;
6671         struct drm_connector_state *new_con_state;
6672         struct amdgpu_dm_connector *aconnector;
6673         struct dm_connector_state *dm_conn_state;
6674         int i, j, clock, bpp;
6675         int vcpi, pbn_div, pbn = 0;
6676
6677         for_each_new_connector_in_state(state, connector, new_con_state, i) {
6678
6679                 aconnector = to_amdgpu_dm_connector(connector);
6680
6681                 if (!aconnector->port)
6682                         continue;
6683
6684                 if (!new_con_state || !new_con_state->crtc)
6685                         continue;
6686
6687                 dm_conn_state = to_dm_connector_state(new_con_state);
6688
6689                 for (j = 0; j < dc_state->stream_count; j++) {
6690                         stream = dc_state->streams[j];
6691                         if (!stream)
6692                                 continue;
6693
6694                         if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6695                                 break;
6696
6697                         stream = NULL;
6698                 }
6699
6700                 if (!stream)
6701                         continue;
6702
6703                 if (stream->timing.flags.DSC != 1) {
6704                         drm_dp_mst_atomic_enable_dsc(state,
6705                                                      aconnector->port,
6706                                                      dm_conn_state->pbn,
6707                                                      0,
6708                                                      false);
6709                         continue;
6710                 }
6711
6712                 pbn_div = dm_mst_get_pbn_divider(stream->link);
6713                 bpp = stream->timing.dsc_cfg.bits_per_pixel;
6714                 clock = stream->timing.pix_clk_100hz / 10;
6715                 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6716                 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6717                                                     aconnector->port,
6718                                                     pbn, pbn_div,
6719                                                     true);
6720                 if (vcpi < 0)
6721                         return vcpi;
6722
6723                 dm_conn_state->pbn = pbn;
6724                 dm_conn_state->vcpi_slots = vcpi;
6725         }
6726         return 0;
6727 }
6728 #endif
6729
6730 static void dm_drm_plane_reset(struct drm_plane *plane)
6731 {
6732         struct dm_plane_state *amdgpu_state = NULL;
6733
6734         if (plane->state)
6735                 plane->funcs->atomic_destroy_state(plane, plane->state);
6736
6737         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6738         WARN_ON(amdgpu_state == NULL);
6739
6740         if (amdgpu_state)
6741                 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6742 }
6743
6744 static struct drm_plane_state *
6745 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6746 {
6747         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6748
6749         old_dm_plane_state = to_dm_plane_state(plane->state);
6750         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6751         if (!dm_plane_state)
6752                 return NULL;
6753
6754         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6755
6756         if (old_dm_plane_state->dc_state) {
6757                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6758                 dc_plane_state_retain(dm_plane_state->dc_state);
6759         }
6760
6761         return &dm_plane_state->base;
6762 }
6763
6764 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6765                                 struct drm_plane_state *state)
6766 {
6767         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6768
6769         if (dm_plane_state->dc_state)
6770                 dc_plane_state_release(dm_plane_state->dc_state);
6771
6772         drm_atomic_helper_plane_destroy_state(plane, state);
6773 }
6774
6775 static const struct drm_plane_funcs dm_plane_funcs = {
6776         .update_plane   = drm_atomic_helper_update_plane,
6777         .disable_plane  = drm_atomic_helper_disable_plane,
6778         .destroy        = drm_primary_helper_destroy,
6779         .reset = dm_drm_plane_reset,
6780         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6781         .atomic_destroy_state = dm_drm_plane_destroy_state,
6782         .format_mod_supported = dm_plane_format_mod_supported,
6783 };
6784
6785 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6786                                       struct drm_plane_state *new_state)
6787 {
6788         struct amdgpu_framebuffer *afb;
6789         struct drm_gem_object *obj;
6790         struct amdgpu_device *adev;
6791         struct amdgpu_bo *rbo;
6792         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6793         struct list_head list;
6794         struct ttm_validate_buffer tv;
6795         struct ww_acquire_ctx ticket;
6796         uint32_t domain;
6797         int r;
6798
6799         if (!new_state->fb) {
6800                 DRM_DEBUG_KMS("No FB bound\n");
6801                 return 0;
6802         }
6803
6804         afb = to_amdgpu_framebuffer(new_state->fb);
6805         obj = new_state->fb->obj[0];
6806         rbo = gem_to_amdgpu_bo(obj);
6807         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6808         INIT_LIST_HEAD(&list);
6809
6810         tv.bo = &rbo->tbo;
6811         tv.num_shared = 1;
6812         list_add(&tv.head, &list);
6813
6814         r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6815         if (r) {
6816                 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6817                 return r;
6818         }
6819
6820         if (plane->type != DRM_PLANE_TYPE_CURSOR)
6821                 domain = amdgpu_display_supported_domains(adev, rbo->flags);
6822         else
6823                 domain = AMDGPU_GEM_DOMAIN_VRAM;
6824
6825         r = amdgpu_bo_pin(rbo, domain);
6826         if (unlikely(r != 0)) {
6827                 if (r != -ERESTARTSYS)
6828                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6829                 ttm_eu_backoff_reservation(&ticket, &list);
6830                 return r;
6831         }
6832
6833         r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6834         if (unlikely(r != 0)) {
6835                 amdgpu_bo_unpin(rbo);
6836                 ttm_eu_backoff_reservation(&ticket, &list);
6837                 DRM_ERROR("%p bind failed\n", rbo);
6838                 return r;
6839         }
6840
6841         ttm_eu_backoff_reservation(&ticket, &list);
6842
6843         afb->address = amdgpu_bo_gpu_offset(rbo);
6844
6845         amdgpu_bo_ref(rbo);
6846
6847         /**
6848          * We don't do surface updates on planes that have been newly created,
6849          * but we also don't have the afb->address during atomic check.
6850          *
6851          * Fill in buffer attributes depending on the address here, but only on
6852          * newly created planes since they're not being used by DC yet and this
6853          * won't modify global state.
6854          */
6855         dm_plane_state_old = to_dm_plane_state(plane->state);
6856         dm_plane_state_new = to_dm_plane_state(new_state);
6857
6858         if (dm_plane_state_new->dc_state &&
6859             dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6860                 struct dc_plane_state *plane_state =
6861                         dm_plane_state_new->dc_state;
6862                 bool force_disable_dcc = !plane_state->dcc.enable;
6863
6864                 fill_plane_buffer_attributes(
6865                         adev, afb, plane_state->format, plane_state->rotation,
6866                         afb->tiling_flags,
6867                         &plane_state->tiling_info, &plane_state->plane_size,
6868                         &plane_state->dcc, &plane_state->address,
6869                         afb->tmz_surface, force_disable_dcc);
6870         }
6871
6872         return 0;
6873 }
6874
6875 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6876                                        struct drm_plane_state *old_state)
6877 {
6878         struct amdgpu_bo *rbo;
6879         int r;
6880
6881         if (!old_state->fb)
6882                 return;
6883
6884         rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6885         r = amdgpu_bo_reserve(rbo, false);
6886         if (unlikely(r)) {
6887                 DRM_ERROR("failed to reserve rbo before unpin\n");
6888                 return;
6889         }
6890
6891         amdgpu_bo_unpin(rbo);
6892         amdgpu_bo_unreserve(rbo);
6893         amdgpu_bo_unref(&rbo);
6894 }
6895
6896 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6897                                        struct drm_crtc_state *new_crtc_state)
6898 {
6899         struct drm_framebuffer *fb = state->fb;
6900         int min_downscale, max_upscale;
6901         int min_scale = 0;
6902         int max_scale = INT_MAX;
6903
6904         /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6905         if (fb && state->crtc) {
6906                 /* Validate viewport to cover the case when only the position changes */
6907                 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
6908                         int viewport_width = state->crtc_w;
6909                         int viewport_height = state->crtc_h;
6910
6911                         if (state->crtc_x < 0)
6912                                 viewport_width += state->crtc_x;
6913                         else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
6914                                 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
6915
6916                         if (state->crtc_y < 0)
6917                                 viewport_height += state->crtc_y;
6918                         else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
6919                                 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
6920
6921                         if (viewport_width < 0 || viewport_height < 0) {
6922                                 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
6923                                 return -EINVAL;
6924                         } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
6925                                 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
6926                                 return -EINVAL;
6927                         } else if (viewport_height < MIN_VIEWPORT_SIZE) {
6928                                 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
6929                                 return -EINVAL;
6930                         }
6931
6932                 }
6933
6934                 /* Get min/max allowed scaling factors from plane caps. */
6935                 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6936                                              &min_downscale, &max_upscale);
6937                 /*
6938                  * Convert to drm convention: 16.16 fixed point, instead of dc's
6939                  * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6940                  * dst/src, so min_scale = 1.0 / max_upscale, etc.
6941                  */
6942                 min_scale = (1000 << 16) / max_upscale;
6943                 max_scale = (1000 << 16) / min_downscale;
6944         }
6945
6946         return drm_atomic_helper_check_plane_state(
6947                 state, new_crtc_state, min_scale, max_scale, true, true);
6948 }
6949
6950 static int dm_plane_atomic_check(struct drm_plane *plane,
6951                                  struct drm_atomic_state *state)
6952 {
6953         struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
6954                                                                                  plane);
6955         struct amdgpu_device *adev = drm_to_adev(plane->dev);
6956         struct dc *dc = adev->dm.dc;
6957         struct dm_plane_state *dm_plane_state;
6958         struct dc_scaling_info scaling_info;
6959         struct drm_crtc_state *new_crtc_state;
6960         int ret;
6961
6962         trace_amdgpu_dm_plane_atomic_check(new_plane_state);
6963
6964         dm_plane_state = to_dm_plane_state(new_plane_state);
6965
6966         if (!dm_plane_state->dc_state)
6967                 return 0;
6968
6969         new_crtc_state =
6970                 drm_atomic_get_new_crtc_state(state,
6971                                               new_plane_state->crtc);
6972         if (!new_crtc_state)
6973                 return -EINVAL;
6974
6975         ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
6976         if (ret)
6977                 return ret;
6978
6979         ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
6980         if (ret)
6981                 return ret;
6982
6983         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6984                 return 0;
6985
6986         return -EINVAL;
6987 }
6988
6989 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6990                                        struct drm_atomic_state *state)
6991 {
6992         /* Only support async updates on cursor planes. */
6993         if (plane->type != DRM_PLANE_TYPE_CURSOR)
6994                 return -EINVAL;
6995
6996         return 0;
6997 }
6998
6999 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7000                                          struct drm_atomic_state *state)
7001 {
7002         struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7003                                                                            plane);
7004         struct drm_plane_state *old_state =
7005                 drm_atomic_get_old_plane_state(state, plane);
7006
7007         trace_amdgpu_dm_atomic_update_cursor(new_state);
7008
7009         swap(plane->state->fb, new_state->fb);
7010
7011         plane->state->src_x = new_state->src_x;
7012         plane->state->src_y = new_state->src_y;
7013         plane->state->src_w = new_state->src_w;
7014         plane->state->src_h = new_state->src_h;
7015         plane->state->crtc_x = new_state->crtc_x;
7016         plane->state->crtc_y = new_state->crtc_y;
7017         plane->state->crtc_w = new_state->crtc_w;
7018         plane->state->crtc_h = new_state->crtc_h;
7019
7020         handle_cursor_update(plane, old_state);
7021 }
7022
7023 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7024         .prepare_fb = dm_plane_helper_prepare_fb,
7025         .cleanup_fb = dm_plane_helper_cleanup_fb,
7026         .atomic_check = dm_plane_atomic_check,
7027         .atomic_async_check = dm_plane_atomic_async_check,
7028         .atomic_async_update = dm_plane_atomic_async_update
7029 };
7030
7031 /*
7032  * TODO: these are currently initialized to rgb formats only.
7033  * For future use cases we should either initialize them dynamically based on
7034  * plane capabilities, or initialize this array to all formats, so internal drm
7035  * check will succeed, and let DC implement proper check
7036  */
7037 static const uint32_t rgb_formats[] = {
7038         DRM_FORMAT_XRGB8888,
7039         DRM_FORMAT_ARGB8888,
7040         DRM_FORMAT_RGBA8888,
7041         DRM_FORMAT_XRGB2101010,
7042         DRM_FORMAT_XBGR2101010,
7043         DRM_FORMAT_ARGB2101010,
7044         DRM_FORMAT_ABGR2101010,
7045         DRM_FORMAT_XBGR8888,
7046         DRM_FORMAT_ABGR8888,
7047         DRM_FORMAT_RGB565,
7048 };
7049
7050 static const uint32_t overlay_formats[] = {
7051         DRM_FORMAT_XRGB8888,
7052         DRM_FORMAT_ARGB8888,
7053         DRM_FORMAT_RGBA8888,
7054         DRM_FORMAT_XBGR8888,
7055         DRM_FORMAT_ABGR8888,
7056         DRM_FORMAT_RGB565
7057 };
7058
7059 static const u32 cursor_formats[] = {
7060         DRM_FORMAT_ARGB8888
7061 };
7062
7063 static int get_plane_formats(const struct drm_plane *plane,
7064                              const struct dc_plane_cap *plane_cap,
7065                              uint32_t *formats, int max_formats)
7066 {
7067         int i, num_formats = 0;
7068
7069         /*
7070          * TODO: Query support for each group of formats directly from
7071          * DC plane caps. This will require adding more formats to the
7072          * caps list.
7073          */
7074
7075         switch (plane->type) {
7076         case DRM_PLANE_TYPE_PRIMARY:
7077                 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7078                         if (num_formats >= max_formats)
7079                                 break;
7080
7081                         formats[num_formats++] = rgb_formats[i];
7082                 }
7083
7084                 if (plane_cap && plane_cap->pixel_format_support.nv12)
7085                         formats[num_formats++] = DRM_FORMAT_NV12;
7086                 if (plane_cap && plane_cap->pixel_format_support.p010)
7087                         formats[num_formats++] = DRM_FORMAT_P010;
7088                 if (plane_cap && plane_cap->pixel_format_support.fp16) {
7089                         formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7090                         formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7091                         formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7092                         formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7093                 }
7094                 break;
7095
7096         case DRM_PLANE_TYPE_OVERLAY:
7097                 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7098                         if (num_formats >= max_formats)
7099                                 break;
7100
7101                         formats[num_formats++] = overlay_formats[i];
7102                 }
7103                 break;
7104
7105         case DRM_PLANE_TYPE_CURSOR:
7106                 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7107                         if (num_formats >= max_formats)
7108                                 break;
7109
7110                         formats[num_formats++] = cursor_formats[i];
7111                 }
7112                 break;
7113         }
7114
7115         return num_formats;
7116 }
7117
7118 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7119                                 struct drm_plane *plane,
7120                                 unsigned long possible_crtcs,
7121                                 const struct dc_plane_cap *plane_cap)
7122 {
7123         uint32_t formats[32];
7124         int num_formats;
7125         int res = -EPERM;
7126         unsigned int supported_rotations;
7127         uint64_t *modifiers = NULL;
7128
7129         num_formats = get_plane_formats(plane, plane_cap, formats,
7130                                         ARRAY_SIZE(formats));
7131
7132         res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7133         if (res)
7134                 return res;
7135
7136         res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7137                                        &dm_plane_funcs, formats, num_formats,
7138                                        modifiers, plane->type, NULL);
7139         kfree(modifiers);
7140         if (res)
7141                 return res;
7142
7143         if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7144             plane_cap && plane_cap->per_pixel_alpha) {
7145                 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7146                                           BIT(DRM_MODE_BLEND_PREMULTI);
7147
7148                 drm_plane_create_alpha_property(plane);
7149                 drm_plane_create_blend_mode_property(plane, blend_caps);
7150         }
7151
7152         if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7153             plane_cap &&
7154             (plane_cap->pixel_format_support.nv12 ||
7155              plane_cap->pixel_format_support.p010)) {
7156                 /* This only affects YUV formats. */
7157                 drm_plane_create_color_properties(
7158                         plane,
7159                         BIT(DRM_COLOR_YCBCR_BT601) |
7160                         BIT(DRM_COLOR_YCBCR_BT709) |
7161                         BIT(DRM_COLOR_YCBCR_BT2020),
7162                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7163                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7164                         DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7165         }
7166
7167         supported_rotations =
7168                 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7169                 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7170
7171         if (dm->adev->asic_type >= CHIP_BONAIRE &&
7172             plane->type != DRM_PLANE_TYPE_CURSOR)
7173                 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7174                                                    supported_rotations);
7175
7176         drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7177
7178         /* Create (reset) the plane state */
7179         if (plane->funcs->reset)
7180                 plane->funcs->reset(plane);
7181
7182         return 0;
7183 }
7184
7185 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7186                                struct drm_plane *plane,
7187                                uint32_t crtc_index)
7188 {
7189         struct amdgpu_crtc *acrtc = NULL;
7190         struct drm_plane *cursor_plane;
7191
7192         int res = -ENOMEM;
7193
7194         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7195         if (!cursor_plane)
7196                 goto fail;
7197
7198         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7199         res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7200
7201         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7202         if (!acrtc)
7203                 goto fail;
7204
7205         res = drm_crtc_init_with_planes(
7206                         dm->ddev,
7207                         &acrtc->base,
7208                         plane,
7209                         cursor_plane,
7210                         &amdgpu_dm_crtc_funcs, NULL);
7211
7212         if (res)
7213                 goto fail;
7214
7215         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7216
7217         /* Create (reset) the plane state */
7218         if (acrtc->base.funcs->reset)
7219                 acrtc->base.funcs->reset(&acrtc->base);
7220
7221         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7222         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7223
7224         acrtc->crtc_id = crtc_index;
7225         acrtc->base.enabled = false;
7226         acrtc->otg_inst = -1;
7227
7228         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7229         drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7230                                    true, MAX_COLOR_LUT_ENTRIES);
7231         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7232
7233         return 0;
7234
7235 fail:
7236         kfree(acrtc);
7237         kfree(cursor_plane);
7238         return res;
7239 }
7240
7241
7242 static int to_drm_connector_type(enum signal_type st)
7243 {
7244         switch (st) {
7245         case SIGNAL_TYPE_HDMI_TYPE_A:
7246                 return DRM_MODE_CONNECTOR_HDMIA;
7247         case SIGNAL_TYPE_EDP:
7248                 return DRM_MODE_CONNECTOR_eDP;
7249         case SIGNAL_TYPE_LVDS:
7250                 return DRM_MODE_CONNECTOR_LVDS;
7251         case SIGNAL_TYPE_RGB:
7252                 return DRM_MODE_CONNECTOR_VGA;
7253         case SIGNAL_TYPE_DISPLAY_PORT:
7254         case SIGNAL_TYPE_DISPLAY_PORT_MST:
7255                 return DRM_MODE_CONNECTOR_DisplayPort;
7256         case SIGNAL_TYPE_DVI_DUAL_LINK:
7257         case SIGNAL_TYPE_DVI_SINGLE_LINK:
7258                 return DRM_MODE_CONNECTOR_DVID;
7259         case SIGNAL_TYPE_VIRTUAL:
7260                 return DRM_MODE_CONNECTOR_VIRTUAL;
7261
7262         default:
7263                 return DRM_MODE_CONNECTOR_Unknown;
7264         }
7265 }
7266
7267 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7268 {
7269         struct drm_encoder *encoder;
7270
7271         /* There is only one encoder per connector */
7272         drm_connector_for_each_possible_encoder(connector, encoder)
7273                 return encoder;
7274
7275         return NULL;
7276 }
7277
7278 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7279 {
7280         struct drm_encoder *encoder;
7281         struct amdgpu_encoder *amdgpu_encoder;
7282
7283         encoder = amdgpu_dm_connector_to_encoder(connector);
7284
7285         if (encoder == NULL)
7286                 return;
7287
7288         amdgpu_encoder = to_amdgpu_encoder(encoder);
7289
7290         amdgpu_encoder->native_mode.clock = 0;
7291
7292         if (!list_empty(&connector->probed_modes)) {
7293                 struct drm_display_mode *preferred_mode = NULL;
7294
7295                 list_for_each_entry(preferred_mode,
7296                                     &connector->probed_modes,
7297                                     head) {
7298                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7299                                 amdgpu_encoder->native_mode = *preferred_mode;
7300
7301                         break;
7302                 }
7303
7304         }
7305 }
7306
7307 static struct drm_display_mode *
7308 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7309                              char *name,
7310                              int hdisplay, int vdisplay)
7311 {
7312         struct drm_device *dev = encoder->dev;
7313         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7314         struct drm_display_mode *mode = NULL;
7315         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7316
7317         mode = drm_mode_duplicate(dev, native_mode);
7318
7319         if (mode == NULL)
7320                 return NULL;
7321
7322         mode->hdisplay = hdisplay;
7323         mode->vdisplay = vdisplay;
7324         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7325         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7326
7327         return mode;
7328
7329 }
7330
7331 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7332                                                  struct drm_connector *connector)
7333 {
7334         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7335         struct drm_display_mode *mode = NULL;
7336         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7337         struct amdgpu_dm_connector *amdgpu_dm_connector =
7338                                 to_amdgpu_dm_connector(connector);
7339         int i;
7340         int n;
7341         struct mode_size {
7342                 char name[DRM_DISPLAY_MODE_LEN];
7343                 int w;
7344                 int h;
7345         } common_modes[] = {
7346                 {  "640x480",  640,  480},
7347                 {  "800x600",  800,  600},
7348                 { "1024x768", 1024,  768},
7349                 { "1280x720", 1280,  720},
7350                 { "1280x800", 1280,  800},
7351                 {"1280x1024", 1280, 1024},
7352                 { "1440x900", 1440,  900},
7353                 {"1680x1050", 1680, 1050},
7354                 {"1600x1200", 1600, 1200},
7355                 {"1920x1080", 1920, 1080},
7356                 {"1920x1200", 1920, 1200}
7357         };
7358
7359         n = ARRAY_SIZE(common_modes);
7360
7361         for (i = 0; i < n; i++) {
7362                 struct drm_display_mode *curmode = NULL;
7363                 bool mode_existed = false;
7364
7365                 if (common_modes[i].w > native_mode->hdisplay ||
7366                     common_modes[i].h > native_mode->vdisplay ||
7367                    (common_modes[i].w == native_mode->hdisplay &&
7368                     common_modes[i].h == native_mode->vdisplay))
7369                         continue;
7370
7371                 list_for_each_entry(curmode, &connector->probed_modes, head) {
7372                         if (common_modes[i].w == curmode->hdisplay &&
7373                             common_modes[i].h == curmode->vdisplay) {
7374                                 mode_existed = true;
7375                                 break;
7376                         }
7377                 }
7378
7379                 if (mode_existed)
7380                         continue;
7381
7382                 mode = amdgpu_dm_create_common_mode(encoder,
7383                                 common_modes[i].name, common_modes[i].w,
7384                                 common_modes[i].h);
7385                 drm_mode_probed_add(connector, mode);
7386                 amdgpu_dm_connector->num_modes++;
7387         }
7388 }
7389
7390 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7391                                               struct edid *edid)
7392 {
7393         struct amdgpu_dm_connector *amdgpu_dm_connector =
7394                         to_amdgpu_dm_connector(connector);
7395
7396         if (edid) {
7397                 /* empty probed_modes */
7398                 INIT_LIST_HEAD(&connector->probed_modes);
7399                 amdgpu_dm_connector->num_modes =
7400                                 drm_add_edid_modes(connector, edid);
7401
7402                 /* sorting the probed modes before calling function
7403                  * amdgpu_dm_get_native_mode() since EDID can have
7404                  * more than one preferred mode. The modes that are
7405                  * later in the probed mode list could be of higher
7406                  * and preferred resolution. For example, 3840x2160
7407                  * resolution in base EDID preferred timing and 4096x2160
7408                  * preferred resolution in DID extension block later.
7409                  */
7410                 drm_mode_sort(&connector->probed_modes);
7411                 amdgpu_dm_get_native_mode(connector);
7412
7413                 /* Freesync capabilities are reset by calling
7414                  * drm_add_edid_modes() and need to be
7415                  * restored here.
7416                  */
7417                 amdgpu_dm_update_freesync_caps(connector, edid);
7418         } else {
7419                 amdgpu_dm_connector->num_modes = 0;
7420         }
7421 }
7422
7423 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7424                               struct drm_display_mode *mode)
7425 {
7426         struct drm_display_mode *m;
7427
7428         list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7429                 if (drm_mode_equal(m, mode))
7430                         return true;
7431         }
7432
7433         return false;
7434 }
7435
7436 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7437 {
7438         const struct drm_display_mode *m;
7439         struct drm_display_mode *new_mode;
7440         uint i;
7441         uint32_t new_modes_count = 0;
7442
7443         /* Standard FPS values
7444          *
7445          * 23.976   - TV/NTSC
7446          * 24       - Cinema
7447          * 25       - TV/PAL
7448          * 29.97    - TV/NTSC
7449          * 30       - TV/NTSC
7450          * 48       - Cinema HFR
7451          * 50       - TV/PAL
7452          * 60       - Commonly used
7453          * 48,72,96 - Multiples of 24
7454          */
7455         const uint32_t common_rates[] = { 23976, 24000, 25000, 29970, 30000,
7456                                          48000, 50000, 60000, 72000, 96000 };
7457
7458         /*
7459          * Find mode with highest refresh rate with the same resolution
7460          * as the preferred mode. Some monitors report a preferred mode
7461          * with lower resolution than the highest refresh rate supported.
7462          */
7463
7464         m = get_highest_refresh_rate_mode(aconnector, true);
7465         if (!m)
7466                 return 0;
7467
7468         for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7469                 uint64_t target_vtotal, target_vtotal_diff;
7470                 uint64_t num, den;
7471
7472                 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7473                         continue;
7474
7475                 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7476                     common_rates[i] > aconnector->max_vfreq * 1000)
7477                         continue;
7478
7479                 num = (unsigned long long)m->clock * 1000 * 1000;
7480                 den = common_rates[i] * (unsigned long long)m->htotal;
7481                 target_vtotal = div_u64(num, den);
7482                 target_vtotal_diff = target_vtotal - m->vtotal;
7483
7484                 /* Check for illegal modes */
7485                 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7486                     m->vsync_end + target_vtotal_diff < m->vsync_start ||
7487                     m->vtotal + target_vtotal_diff < m->vsync_end)
7488                         continue;
7489
7490                 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7491                 if (!new_mode)
7492                         goto out;
7493
7494                 new_mode->vtotal += (u16)target_vtotal_diff;
7495                 new_mode->vsync_start += (u16)target_vtotal_diff;
7496                 new_mode->vsync_end += (u16)target_vtotal_diff;
7497                 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7498                 new_mode->type |= DRM_MODE_TYPE_DRIVER;
7499
7500                 if (!is_duplicate_mode(aconnector, new_mode)) {
7501                         drm_mode_probed_add(&aconnector->base, new_mode);
7502                         new_modes_count += 1;
7503                 } else
7504                         drm_mode_destroy(aconnector->base.dev, new_mode);
7505         }
7506  out:
7507         return new_modes_count;
7508 }
7509
7510 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7511                                                    struct edid *edid)
7512 {
7513         struct amdgpu_dm_connector *amdgpu_dm_connector =
7514                 to_amdgpu_dm_connector(connector);
7515
7516         if (!(amdgpu_freesync_vid_mode && edid))
7517                 return;
7518
7519         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7520                 amdgpu_dm_connector->num_modes +=
7521                         add_fs_modes(amdgpu_dm_connector);
7522 }
7523
7524 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
7525 {
7526         struct amdgpu_dm_connector *amdgpu_dm_connector =
7527                         to_amdgpu_dm_connector(connector);
7528         struct drm_encoder *encoder;
7529         struct edid *edid = amdgpu_dm_connector->edid;
7530
7531         encoder = amdgpu_dm_connector_to_encoder(connector);
7532
7533         if (!drm_edid_is_valid(edid)) {
7534                 amdgpu_dm_connector->num_modes =
7535                                 drm_add_modes_noedid(connector, 640, 480);
7536         } else {
7537                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
7538                 amdgpu_dm_connector_add_common_modes(encoder, connector);
7539                 amdgpu_dm_connector_add_freesync_modes(connector, edid);
7540         }
7541         amdgpu_dm_fbc_init(connector);
7542
7543         return amdgpu_dm_connector->num_modes;
7544 }
7545
7546 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7547                                      struct amdgpu_dm_connector *aconnector,
7548                                      int connector_type,
7549                                      struct dc_link *link,
7550                                      int link_index)
7551 {
7552         struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7553
7554         /*
7555          * Some of the properties below require access to state, like bpc.
7556          * Allocate some default initial connector state with our reset helper.
7557          */
7558         if (aconnector->base.funcs->reset)
7559                 aconnector->base.funcs->reset(&aconnector->base);
7560
7561         aconnector->connector_id = link_index;
7562         aconnector->dc_link = link;
7563         aconnector->base.interlace_allowed = false;
7564         aconnector->base.doublescan_allowed = false;
7565         aconnector->base.stereo_allowed = false;
7566         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7567         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7568         aconnector->audio_inst = -1;
7569         mutex_init(&aconnector->hpd_lock);
7570
7571         /*
7572          * configure support HPD hot plug connector_>polled default value is 0
7573          * which means HPD hot plug not supported
7574          */
7575         switch (connector_type) {
7576         case DRM_MODE_CONNECTOR_HDMIA:
7577                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7578                 aconnector->base.ycbcr_420_allowed =
7579                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7580                 break;
7581         case DRM_MODE_CONNECTOR_DisplayPort:
7582                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7583                 aconnector->base.ycbcr_420_allowed =
7584                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
7585                 break;
7586         case DRM_MODE_CONNECTOR_DVID:
7587                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7588                 break;
7589         default:
7590                 break;
7591         }
7592
7593         drm_object_attach_property(&aconnector->base.base,
7594                                 dm->ddev->mode_config.scaling_mode_property,
7595                                 DRM_MODE_SCALE_NONE);
7596
7597         drm_object_attach_property(&aconnector->base.base,
7598                                 adev->mode_info.underscan_property,
7599                                 UNDERSCAN_OFF);
7600         drm_object_attach_property(&aconnector->base.base,
7601                                 adev->mode_info.underscan_hborder_property,
7602                                 0);
7603         drm_object_attach_property(&aconnector->base.base,
7604                                 adev->mode_info.underscan_vborder_property,
7605                                 0);
7606
7607         if (!aconnector->mst_port)
7608                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7609
7610         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
7611         aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7612         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7613
7614         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7615             (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7616                 drm_object_attach_property(&aconnector->base.base,
7617                                 adev->mode_info.abm_level_property, 0);
7618         }
7619
7620         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7621             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7622             connector_type == DRM_MODE_CONNECTOR_eDP) {
7623                 drm_object_attach_property(
7624                         &aconnector->base.base,
7625                         dm->ddev->mode_config.hdr_output_metadata_property, 0);
7626
7627                 if (!aconnector->mst_port)
7628                         drm_connector_attach_vrr_capable_property(&aconnector->base);
7629
7630 #ifdef CONFIG_DRM_AMD_DC_HDCP
7631                 if (adev->dm.hdcp_workqueue)
7632                         drm_connector_attach_content_protection_property(&aconnector->base, true);
7633 #endif
7634         }
7635 }
7636
7637 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7638                               struct i2c_msg *msgs, int num)
7639 {
7640         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7641         struct ddc_service *ddc_service = i2c->ddc_service;
7642         struct i2c_command cmd;
7643         int i;
7644         int result = -EIO;
7645
7646         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7647
7648         if (!cmd.payloads)
7649                 return result;
7650
7651         cmd.number_of_payloads = num;
7652         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7653         cmd.speed = 100;
7654
7655         for (i = 0; i < num; i++) {
7656                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7657                 cmd.payloads[i].address = msgs[i].addr;
7658                 cmd.payloads[i].length = msgs[i].len;
7659                 cmd.payloads[i].data = msgs[i].buf;
7660         }
7661
7662         if (dc_submit_i2c(
7663                         ddc_service->ctx->dc,
7664                         ddc_service->ddc_pin->hw_info.ddc_channel,
7665                         &cmd))
7666                 result = num;
7667
7668         kfree(cmd.payloads);
7669         return result;
7670 }
7671
7672 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7673 {
7674         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7675 }
7676
7677 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7678         .master_xfer = amdgpu_dm_i2c_xfer,
7679         .functionality = amdgpu_dm_i2c_func,
7680 };
7681
7682 static struct amdgpu_i2c_adapter *
7683 create_i2c(struct ddc_service *ddc_service,
7684            int link_index,
7685            int *res)
7686 {
7687         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7688         struct amdgpu_i2c_adapter *i2c;
7689
7690         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7691         if (!i2c)
7692                 return NULL;
7693         i2c->base.owner = THIS_MODULE;
7694         i2c->base.class = I2C_CLASS_DDC;
7695         i2c->base.dev.parent = &adev->pdev->dev;
7696         i2c->base.algo = &amdgpu_dm_i2c_algo;
7697         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7698         i2c_set_adapdata(&i2c->base, i2c);
7699         i2c->ddc_service = ddc_service;
7700         i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7701
7702         return i2c;
7703 }
7704
7705
7706 /*
7707  * Note: this function assumes that dc_link_detect() was called for the
7708  * dc_link which will be represented by this aconnector.
7709  */
7710 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7711                                     struct amdgpu_dm_connector *aconnector,
7712                                     uint32_t link_index,
7713                                     struct amdgpu_encoder *aencoder)
7714 {
7715         int res = 0;
7716         int connector_type;
7717         struct dc *dc = dm->dc;
7718         struct dc_link *link = dc_get_link_at_index(dc, link_index);
7719         struct amdgpu_i2c_adapter *i2c;
7720
7721         link->priv = aconnector;
7722
7723         DRM_DEBUG_DRIVER("%s()\n", __func__);
7724
7725         i2c = create_i2c(link->ddc, link->link_index, &res);
7726         if (!i2c) {
7727                 DRM_ERROR("Failed to create i2c adapter data\n");
7728                 return -ENOMEM;
7729         }
7730
7731         aconnector->i2c = i2c;
7732         res = i2c_add_adapter(&i2c->base);
7733
7734         if (res) {
7735                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7736                 goto out_free;
7737         }
7738
7739         connector_type = to_drm_connector_type(link->connector_signal);
7740
7741         res = drm_connector_init_with_ddc(
7742                         dm->ddev,
7743                         &aconnector->base,
7744                         &amdgpu_dm_connector_funcs,
7745                         connector_type,
7746                         &i2c->base);
7747
7748         if (res) {
7749                 DRM_ERROR("connector_init failed\n");
7750                 aconnector->connector_id = -1;
7751                 goto out_free;
7752         }
7753
7754         drm_connector_helper_add(
7755                         &aconnector->base,
7756                         &amdgpu_dm_connector_helper_funcs);
7757
7758         amdgpu_dm_connector_init_helper(
7759                 dm,
7760                 aconnector,
7761                 connector_type,
7762                 link,
7763                 link_index);
7764
7765         drm_connector_attach_encoder(
7766                 &aconnector->base, &aencoder->base);
7767
7768         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7769                 || connector_type == DRM_MODE_CONNECTOR_eDP)
7770                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7771
7772 out_free:
7773         if (res) {
7774                 kfree(i2c);
7775                 aconnector->i2c = NULL;
7776         }
7777         return res;
7778 }
7779
7780 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7781 {
7782         switch (adev->mode_info.num_crtc) {
7783         case 1:
7784                 return 0x1;
7785         case 2:
7786                 return 0x3;
7787         case 3:
7788                 return 0x7;
7789         case 4:
7790                 return 0xf;
7791         case 5:
7792                 return 0x1f;
7793         case 6:
7794         default:
7795                 return 0x3f;
7796         }
7797 }
7798
7799 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7800                                   struct amdgpu_encoder *aencoder,
7801                                   uint32_t link_index)
7802 {
7803         struct amdgpu_device *adev = drm_to_adev(dev);
7804
7805         int res = drm_encoder_init(dev,
7806                                    &aencoder->base,
7807                                    &amdgpu_dm_encoder_funcs,
7808                                    DRM_MODE_ENCODER_TMDS,
7809                                    NULL);
7810
7811         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7812
7813         if (!res)
7814                 aencoder->encoder_id = link_index;
7815         else
7816                 aencoder->encoder_id = -1;
7817
7818         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7819
7820         return res;
7821 }
7822
7823 static void manage_dm_interrupts(struct amdgpu_device *adev,
7824                                  struct amdgpu_crtc *acrtc,
7825                                  bool enable)
7826 {
7827         /*
7828          * We have no guarantee that the frontend index maps to the same
7829          * backend index - some even map to more than one.
7830          *
7831          * TODO: Use a different interrupt or check DC itself for the mapping.
7832          */
7833         int irq_type =
7834                 amdgpu_display_crtc_idx_to_irq_type(
7835                         adev,
7836                         acrtc->crtc_id);
7837
7838         if (enable) {
7839                 drm_crtc_vblank_on(&acrtc->base);
7840                 amdgpu_irq_get(
7841                         adev,
7842                         &adev->pageflip_irq,
7843                         irq_type);
7844 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7845                 amdgpu_irq_get(
7846                         adev,
7847                         &adev->vline0_irq,
7848                         irq_type);
7849 #endif
7850         } else {
7851 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7852                 amdgpu_irq_put(
7853                         adev,
7854                         &adev->vline0_irq,
7855                         irq_type);
7856 #endif
7857                 amdgpu_irq_put(
7858                         adev,
7859                         &adev->pageflip_irq,
7860                         irq_type);
7861                 drm_crtc_vblank_off(&acrtc->base);
7862         }
7863 }
7864
7865 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7866                                       struct amdgpu_crtc *acrtc)
7867 {
7868         int irq_type =
7869                 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7870
7871         /**
7872          * This reads the current state for the IRQ and force reapplies
7873          * the setting to hardware.
7874          */
7875         amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7876 }
7877
7878 static bool
7879 is_scaling_state_different(const struct dm_connector_state *dm_state,
7880                            const struct dm_connector_state *old_dm_state)
7881 {
7882         if (dm_state->scaling != old_dm_state->scaling)
7883                 return true;
7884         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7885                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7886                         return true;
7887         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7888                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7889                         return true;
7890         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7891                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7892                 return true;
7893         return false;
7894 }
7895
7896 #ifdef CONFIG_DRM_AMD_DC_HDCP
7897 static bool is_content_protection_different(struct drm_connector_state *state,
7898                                             const struct drm_connector_state *old_state,
7899                                             const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7900 {
7901         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7902         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7903
7904         /* Handle: Type0/1 change */
7905         if (old_state->hdcp_content_type != state->hdcp_content_type &&
7906             state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7907                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7908                 return true;
7909         }
7910
7911         /* CP is being re enabled, ignore this
7912          *
7913          * Handles:     ENABLED -> DESIRED
7914          */
7915         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7916             state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7917                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7918                 return false;
7919         }
7920
7921         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7922          *
7923          * Handles:     UNDESIRED -> ENABLED
7924          */
7925         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7926             state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7927                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7928
7929         /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7930          * hot-plug, headless s3, dpms
7931          *
7932          * Handles:     DESIRED -> DESIRED (Special case)
7933          */
7934         if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7935             connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7936                 dm_con_state->update_hdcp = false;
7937                 return true;
7938         }
7939
7940         /*
7941          * Handles:     UNDESIRED -> UNDESIRED
7942          *              DESIRED -> DESIRED
7943          *              ENABLED -> ENABLED
7944          */
7945         if (old_state->content_protection == state->content_protection)
7946                 return false;
7947
7948         /*
7949          * Handles:     UNDESIRED -> DESIRED
7950          *              DESIRED -> UNDESIRED
7951          *              ENABLED -> UNDESIRED
7952          */
7953         if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7954                 return true;
7955
7956         /*
7957          * Handles:     DESIRED -> ENABLED
7958          */
7959         return false;
7960 }
7961
7962 #endif
7963 static void remove_stream(struct amdgpu_device *adev,
7964                           struct amdgpu_crtc *acrtc,
7965                           struct dc_stream_state *stream)
7966 {
7967         /* this is the update mode case */
7968
7969         acrtc->otg_inst = -1;
7970         acrtc->enabled = false;
7971 }
7972
7973 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7974                                struct dc_cursor_position *position)
7975 {
7976         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7977         int x, y;
7978         int xorigin = 0, yorigin = 0;
7979
7980         if (!crtc || !plane->state->fb)
7981                 return 0;
7982
7983         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7984             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7985                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7986                           __func__,
7987                           plane->state->crtc_w,
7988                           plane->state->crtc_h);
7989                 return -EINVAL;
7990         }
7991
7992         x = plane->state->crtc_x;
7993         y = plane->state->crtc_y;
7994
7995         if (x <= -amdgpu_crtc->max_cursor_width ||
7996             y <= -amdgpu_crtc->max_cursor_height)
7997                 return 0;
7998
7999         if (x < 0) {
8000                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8001                 x = 0;
8002         }
8003         if (y < 0) {
8004                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8005                 y = 0;
8006         }
8007         position->enable = true;
8008         position->translate_by_source = true;
8009         position->x = x;
8010         position->y = y;
8011         position->x_hotspot = xorigin;
8012         position->y_hotspot = yorigin;
8013
8014         return 0;
8015 }
8016
8017 static void handle_cursor_update(struct drm_plane *plane,
8018                                  struct drm_plane_state *old_plane_state)
8019 {
8020         struct amdgpu_device *adev = drm_to_adev(plane->dev);
8021         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8022         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8023         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8024         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8025         uint64_t address = afb ? afb->address : 0;
8026         struct dc_cursor_position position = {0};
8027         struct dc_cursor_attributes attributes;
8028         int ret;
8029
8030         if (!plane->state->fb && !old_plane_state->fb)
8031                 return;
8032
8033         DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8034                       __func__,
8035                       amdgpu_crtc->crtc_id,
8036                       plane->state->crtc_w,
8037                       plane->state->crtc_h);
8038
8039         ret = get_cursor_position(plane, crtc, &position);
8040         if (ret)
8041                 return;
8042
8043         if (!position.enable) {
8044                 /* turn off cursor */
8045                 if (crtc_state && crtc_state->stream) {
8046                         mutex_lock(&adev->dm.dc_lock);
8047                         dc_stream_set_cursor_position(crtc_state->stream,
8048                                                       &position);
8049                         mutex_unlock(&adev->dm.dc_lock);
8050                 }
8051                 return;
8052         }
8053
8054         amdgpu_crtc->cursor_width = plane->state->crtc_w;
8055         amdgpu_crtc->cursor_height = plane->state->crtc_h;
8056
8057         memset(&attributes, 0, sizeof(attributes));
8058         attributes.address.high_part = upper_32_bits(address);
8059         attributes.address.low_part  = lower_32_bits(address);
8060         attributes.width             = plane->state->crtc_w;
8061         attributes.height            = plane->state->crtc_h;
8062         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8063         attributes.rotation_angle    = 0;
8064         attributes.attribute_flags.value = 0;
8065
8066         attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8067
8068         if (crtc_state->stream) {
8069                 mutex_lock(&adev->dm.dc_lock);
8070                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8071                                                          &attributes))
8072                         DRM_ERROR("DC failed to set cursor attributes\n");
8073
8074                 if (!dc_stream_set_cursor_position(crtc_state->stream,
8075                                                    &position))
8076                         DRM_ERROR("DC failed to set cursor position\n");
8077                 mutex_unlock(&adev->dm.dc_lock);
8078         }
8079 }
8080
8081 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8082 {
8083
8084         assert_spin_locked(&acrtc->base.dev->event_lock);
8085         WARN_ON(acrtc->event);
8086
8087         acrtc->event = acrtc->base.state->event;
8088
8089         /* Set the flip status */
8090         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8091
8092         /* Mark this event as consumed */
8093         acrtc->base.state->event = NULL;
8094
8095         DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8096                      acrtc->crtc_id);
8097 }
8098
8099 static void update_freesync_state_on_stream(
8100         struct amdgpu_display_manager *dm,
8101         struct dm_crtc_state *new_crtc_state,
8102         struct dc_stream_state *new_stream,
8103         struct dc_plane_state *surface,
8104         u32 flip_timestamp_in_us)
8105 {
8106         struct mod_vrr_params vrr_params;
8107         struct dc_info_packet vrr_infopacket = {0};
8108         struct amdgpu_device *adev = dm->adev;
8109         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8110         unsigned long flags;
8111         bool pack_sdp_v1_3 = false;
8112
8113         if (!new_stream)
8114                 return;
8115
8116         /*
8117          * TODO: Determine why min/max totals and vrefresh can be 0 here.
8118          * For now it's sufficient to just guard against these conditions.
8119          */
8120
8121         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8122                 return;
8123
8124         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8125         vrr_params = acrtc->dm_irq_params.vrr_params;
8126
8127         if (surface) {
8128                 mod_freesync_handle_preflip(
8129                         dm->freesync_module,
8130                         surface,
8131                         new_stream,
8132                         flip_timestamp_in_us,
8133                         &vrr_params);
8134
8135                 if (adev->family < AMDGPU_FAMILY_AI &&
8136                     amdgpu_dm_vrr_active(new_crtc_state)) {
8137                         mod_freesync_handle_v_update(dm->freesync_module,
8138                                                      new_stream, &vrr_params);
8139
8140                         /* Need to call this before the frame ends. */
8141                         dc_stream_adjust_vmin_vmax(dm->dc,
8142                                                    new_crtc_state->stream,
8143                                                    &vrr_params.adjust);
8144                 }
8145         }
8146
8147         mod_freesync_build_vrr_infopacket(
8148                 dm->freesync_module,
8149                 new_stream,
8150                 &vrr_params,
8151                 PACKET_TYPE_VRR,
8152                 TRANSFER_FUNC_UNKNOWN,
8153                 &vrr_infopacket,
8154                 pack_sdp_v1_3);
8155
8156         new_crtc_state->freesync_timing_changed |=
8157                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8158                         &vrr_params.adjust,
8159                         sizeof(vrr_params.adjust)) != 0);
8160
8161         new_crtc_state->freesync_vrr_info_changed |=
8162                 (memcmp(&new_crtc_state->vrr_infopacket,
8163                         &vrr_infopacket,
8164                         sizeof(vrr_infopacket)) != 0);
8165
8166         acrtc->dm_irq_params.vrr_params = vrr_params;
8167         new_crtc_state->vrr_infopacket = vrr_infopacket;
8168
8169         new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8170         new_stream->vrr_infopacket = vrr_infopacket;
8171
8172         if (new_crtc_state->freesync_vrr_info_changed)
8173                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8174                               new_crtc_state->base.crtc->base.id,
8175                               (int)new_crtc_state->base.vrr_enabled,
8176                               (int)vrr_params.state);
8177
8178         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8179 }
8180
8181 static void update_stream_irq_parameters(
8182         struct amdgpu_display_manager *dm,
8183         struct dm_crtc_state *new_crtc_state)
8184 {
8185         struct dc_stream_state *new_stream = new_crtc_state->stream;
8186         struct mod_vrr_params vrr_params;
8187         struct mod_freesync_config config = new_crtc_state->freesync_config;
8188         struct amdgpu_device *adev = dm->adev;
8189         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8190         unsigned long flags;
8191
8192         if (!new_stream)
8193                 return;
8194
8195         /*
8196          * TODO: Determine why min/max totals and vrefresh can be 0 here.
8197          * For now it's sufficient to just guard against these conditions.
8198          */
8199         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8200                 return;
8201
8202         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8203         vrr_params = acrtc->dm_irq_params.vrr_params;
8204
8205         if (new_crtc_state->vrr_supported &&
8206             config.min_refresh_in_uhz &&
8207             config.max_refresh_in_uhz) {
8208                 /*
8209                  * if freesync compatible mode was set, config.state will be set
8210                  * in atomic check
8211                  */
8212                 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8213                     (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8214                      new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8215                         vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8216                         vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8217                         vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8218                         vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8219                 } else {
8220                         config.state = new_crtc_state->base.vrr_enabled ?
8221                                                      VRR_STATE_ACTIVE_VARIABLE :
8222                                                      VRR_STATE_INACTIVE;
8223                 }
8224         } else {
8225                 config.state = VRR_STATE_UNSUPPORTED;
8226         }
8227
8228         mod_freesync_build_vrr_params(dm->freesync_module,
8229                                       new_stream,
8230                                       &config, &vrr_params);
8231
8232         new_crtc_state->freesync_timing_changed |=
8233                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8234                         &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8235
8236         new_crtc_state->freesync_config = config;
8237         /* Copy state for access from DM IRQ handler */
8238         acrtc->dm_irq_params.freesync_config = config;
8239         acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8240         acrtc->dm_irq_params.vrr_params = vrr_params;
8241         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8242 }
8243
8244 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8245                                             struct dm_crtc_state *new_state)
8246 {
8247         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8248         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8249
8250         if (!old_vrr_active && new_vrr_active) {
8251                 /* Transition VRR inactive -> active:
8252                  * While VRR is active, we must not disable vblank irq, as a
8253                  * reenable after disable would compute bogus vblank/pflip
8254                  * timestamps if it likely happened inside display front-porch.
8255                  *
8256                  * We also need vupdate irq for the actual core vblank handling
8257                  * at end of vblank.
8258                  */
8259                 dm_set_vupdate_irq(new_state->base.crtc, true);
8260                 drm_crtc_vblank_get(new_state->base.crtc);
8261                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8262                                  __func__, new_state->base.crtc->base.id);
8263         } else if (old_vrr_active && !new_vrr_active) {
8264                 /* Transition VRR active -> inactive:
8265                  * Allow vblank irq disable again for fixed refresh rate.
8266                  */
8267                 dm_set_vupdate_irq(new_state->base.crtc, false);
8268                 drm_crtc_vblank_put(new_state->base.crtc);
8269                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8270                                  __func__, new_state->base.crtc->base.id);
8271         }
8272 }
8273
8274 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8275 {
8276         struct drm_plane *plane;
8277         struct drm_plane_state *old_plane_state;
8278         int i;
8279
8280         /*
8281          * TODO: Make this per-stream so we don't issue redundant updates for
8282          * commits with multiple streams.
8283          */
8284         for_each_old_plane_in_state(state, plane, old_plane_state, i)
8285                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8286                         handle_cursor_update(plane, old_plane_state);
8287 }
8288
8289 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8290                                     struct dc_state *dc_state,
8291                                     struct drm_device *dev,
8292                                     struct amdgpu_display_manager *dm,
8293                                     struct drm_crtc *pcrtc,
8294                                     bool wait_for_vblank)
8295 {
8296         uint32_t i;
8297         uint64_t timestamp_ns;
8298         struct drm_plane *plane;
8299         struct drm_plane_state *old_plane_state, *new_plane_state;
8300         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8301         struct drm_crtc_state *new_pcrtc_state =
8302                         drm_atomic_get_new_crtc_state(state, pcrtc);
8303         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8304         struct dm_crtc_state *dm_old_crtc_state =
8305                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8306         int planes_count = 0, vpos, hpos;
8307         long r;
8308         unsigned long flags;
8309         struct amdgpu_bo *abo;
8310         uint32_t target_vblank, last_flip_vblank;
8311         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8312         bool pflip_present = false;
8313         struct {
8314                 struct dc_surface_update surface_updates[MAX_SURFACES];
8315                 struct dc_plane_info plane_infos[MAX_SURFACES];
8316                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8317                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8318                 struct dc_stream_update stream_update;
8319         } *bundle;
8320
8321         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8322
8323         if (!bundle) {
8324                 dm_error("Failed to allocate update bundle\n");
8325                 goto cleanup;
8326         }
8327
8328         /*
8329          * Disable the cursor first if we're disabling all the planes.
8330          * It'll remain on the screen after the planes are re-enabled
8331          * if we don't.
8332          */
8333         if (acrtc_state->active_planes == 0)
8334                 amdgpu_dm_commit_cursors(state);
8335
8336         /* update planes when needed */
8337         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8338                 struct drm_crtc *crtc = new_plane_state->crtc;
8339                 struct drm_crtc_state *new_crtc_state;
8340                 struct drm_framebuffer *fb = new_plane_state->fb;
8341                 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8342                 bool plane_needs_flip;
8343                 struct dc_plane_state *dc_plane;
8344                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8345
8346                 /* Cursor plane is handled after stream updates */
8347                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8348                         continue;
8349
8350                 if (!fb || !crtc || pcrtc != crtc)
8351                         continue;
8352
8353                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8354                 if (!new_crtc_state->active)
8355                         continue;
8356
8357                 dc_plane = dm_new_plane_state->dc_state;
8358
8359                 bundle->surface_updates[planes_count].surface = dc_plane;
8360                 if (new_pcrtc_state->color_mgmt_changed) {
8361                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8362                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8363                         bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8364                 }
8365
8366                 fill_dc_scaling_info(new_plane_state,
8367                                      &bundle->scaling_infos[planes_count]);
8368
8369                 bundle->surface_updates[planes_count].scaling_info =
8370                         &bundle->scaling_infos[planes_count];
8371
8372                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8373
8374                 pflip_present = pflip_present || plane_needs_flip;
8375
8376                 if (!plane_needs_flip) {
8377                         planes_count += 1;
8378                         continue;
8379                 }
8380
8381                 abo = gem_to_amdgpu_bo(fb->obj[0]);
8382
8383                 /*
8384                  * Wait for all fences on this FB. Do limited wait to avoid
8385                  * deadlock during GPU reset when this fence will not signal
8386                  * but we hold reservation lock for the BO.
8387                  */
8388                 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
8389                                                         false,
8390                                                         msecs_to_jiffies(5000));
8391                 if (unlikely(r <= 0))
8392                         DRM_ERROR("Waiting for fences timed out!");
8393
8394                 fill_dc_plane_info_and_addr(
8395                         dm->adev, new_plane_state,
8396                         afb->tiling_flags,
8397                         &bundle->plane_infos[planes_count],
8398                         &bundle->flip_addrs[planes_count].address,
8399                         afb->tmz_surface, false);
8400
8401                 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
8402                                  new_plane_state->plane->index,
8403                                  bundle->plane_infos[planes_count].dcc.enable);
8404
8405                 bundle->surface_updates[planes_count].plane_info =
8406                         &bundle->plane_infos[planes_count];
8407
8408                 /*
8409                  * Only allow immediate flips for fast updates that don't
8410                  * change FB pitch, DCC state, rotation or mirroing.
8411                  */
8412                 bundle->flip_addrs[planes_count].flip_immediate =
8413                         crtc->state->async_flip &&
8414                         acrtc_state->update_type == UPDATE_TYPE_FAST;
8415
8416                 timestamp_ns = ktime_get_ns();
8417                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8418                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8419                 bundle->surface_updates[planes_count].surface = dc_plane;
8420
8421                 if (!bundle->surface_updates[planes_count].surface) {
8422                         DRM_ERROR("No surface for CRTC: id=%d\n",
8423                                         acrtc_attach->crtc_id);
8424                         continue;
8425                 }
8426
8427                 if (plane == pcrtc->primary)
8428                         update_freesync_state_on_stream(
8429                                 dm,
8430                                 acrtc_state,
8431                                 acrtc_state->stream,
8432                                 dc_plane,
8433                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
8434
8435                 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
8436                                  __func__,
8437                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8438                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
8439
8440                 planes_count += 1;
8441
8442         }
8443
8444         if (pflip_present) {
8445                 if (!vrr_active) {
8446                         /* Use old throttling in non-vrr fixed refresh rate mode
8447                          * to keep flip scheduling based on target vblank counts
8448                          * working in a backwards compatible way, e.g., for
8449                          * clients using the GLX_OML_sync_control extension or
8450                          * DRI3/Present extension with defined target_msc.
8451                          */
8452                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
8453                 }
8454                 else {
8455                         /* For variable refresh rate mode only:
8456                          * Get vblank of last completed flip to avoid > 1 vrr
8457                          * flips per video frame by use of throttling, but allow
8458                          * flip programming anywhere in the possibly large
8459                          * variable vrr vblank interval for fine-grained flip
8460                          * timing control and more opportunity to avoid stutter
8461                          * on late submission of flips.
8462                          */
8463                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8464                         last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
8465                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8466                 }
8467
8468                 target_vblank = last_flip_vblank + wait_for_vblank;
8469
8470                 /*
8471                  * Wait until we're out of the vertical blank period before the one
8472                  * targeted by the flip
8473                  */
8474                 while ((acrtc_attach->enabled &&
8475                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8476                                                             0, &vpos, &hpos, NULL,
8477                                                             NULL, &pcrtc->hwmode)
8478                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8479                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8480                         (int)(target_vblank -
8481                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8482                         usleep_range(1000, 1100);
8483                 }
8484
8485                 /**
8486                  * Prepare the flip event for the pageflip interrupt to handle.
8487                  *
8488                  * This only works in the case where we've already turned on the
8489                  * appropriate hardware blocks (eg. HUBP) so in the transition case
8490                  * from 0 -> n planes we have to skip a hardware generated event
8491                  * and rely on sending it from software.
8492                  */
8493                 if (acrtc_attach->base.state->event &&
8494                     acrtc_state->active_planes > 0) {
8495                         drm_crtc_vblank_get(pcrtc);
8496
8497                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8498
8499                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8500                         prepare_flip_isr(acrtc_attach);
8501
8502                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8503                 }
8504
8505                 if (acrtc_state->stream) {
8506                         if (acrtc_state->freesync_vrr_info_changed)
8507                                 bundle->stream_update.vrr_infopacket =
8508                                         &acrtc_state->stream->vrr_infopacket;
8509                 }
8510         }
8511
8512         /* Update the planes if changed or disable if we don't have any. */
8513         if ((planes_count || acrtc_state->active_planes == 0) &&
8514                 acrtc_state->stream) {
8515                 bundle->stream_update.stream = acrtc_state->stream;
8516                 if (new_pcrtc_state->mode_changed) {
8517                         bundle->stream_update.src = acrtc_state->stream->src;
8518                         bundle->stream_update.dst = acrtc_state->stream->dst;
8519                 }
8520
8521                 if (new_pcrtc_state->color_mgmt_changed) {
8522                         /*
8523                          * TODO: This isn't fully correct since we've actually
8524                          * already modified the stream in place.
8525                          */
8526                         bundle->stream_update.gamut_remap =
8527                                 &acrtc_state->stream->gamut_remap_matrix;
8528                         bundle->stream_update.output_csc_transform =
8529                                 &acrtc_state->stream->csc_color_matrix;
8530                         bundle->stream_update.out_transfer_func =
8531                                 acrtc_state->stream->out_transfer_func;
8532                 }
8533
8534                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
8535                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
8536                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
8537
8538                 /*
8539                  * If FreeSync state on the stream has changed then we need to
8540                  * re-adjust the min/max bounds now that DC doesn't handle this
8541                  * as part of commit.
8542                  */
8543                 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
8544                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8545                         dc_stream_adjust_vmin_vmax(
8546                                 dm->dc, acrtc_state->stream,
8547                                 &acrtc_attach->dm_irq_params.vrr_params.adjust);
8548                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8549                 }
8550                 mutex_lock(&dm->dc_lock);
8551                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8552                                 acrtc_state->stream->link->psr_settings.psr_allow_active)
8553                         amdgpu_dm_psr_disable(acrtc_state->stream);
8554
8555                 dc_commit_updates_for_stream(dm->dc,
8556                                                      bundle->surface_updates,
8557                                                      planes_count,
8558                                                      acrtc_state->stream,
8559                                                      &bundle->stream_update,
8560                                                      dc_state);
8561
8562                 /**
8563                  * Enable or disable the interrupts on the backend.
8564                  *
8565                  * Most pipes are put into power gating when unused.
8566                  *
8567                  * When power gating is enabled on a pipe we lose the
8568                  * interrupt enablement state when power gating is disabled.
8569                  *
8570                  * So we need to update the IRQ control state in hardware
8571                  * whenever the pipe turns on (since it could be previously
8572                  * power gated) or off (since some pipes can't be power gated
8573                  * on some ASICs).
8574                  */
8575                 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8576                         dm_update_pflip_irq_state(drm_to_adev(dev),
8577                                                   acrtc_attach);
8578
8579                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8580                                 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8581                                 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8582                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
8583                 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
8584                                 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8585                                 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
8586                         amdgpu_dm_psr_enable(acrtc_state->stream);
8587                 }
8588
8589                 mutex_unlock(&dm->dc_lock);
8590         }
8591
8592         /*
8593          * Update cursor state *after* programming all the planes.
8594          * This avoids redundant programming in the case where we're going
8595          * to be disabling a single plane - those pipes are being disabled.
8596          */
8597         if (acrtc_state->active_planes)
8598                 amdgpu_dm_commit_cursors(state);
8599
8600 cleanup:
8601         kfree(bundle);
8602 }
8603
8604 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8605                                    struct drm_atomic_state *state)
8606 {
8607         struct amdgpu_device *adev = drm_to_adev(dev);
8608         struct amdgpu_dm_connector *aconnector;
8609         struct drm_connector *connector;
8610         struct drm_connector_state *old_con_state, *new_con_state;
8611         struct drm_crtc_state *new_crtc_state;
8612         struct dm_crtc_state *new_dm_crtc_state;
8613         const struct dc_stream_status *status;
8614         int i, inst;
8615
8616         /* Notify device removals. */
8617         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8618                 if (old_con_state->crtc != new_con_state->crtc) {
8619                         /* CRTC changes require notification. */
8620                         goto notify;
8621                 }
8622
8623                 if (!new_con_state->crtc)
8624                         continue;
8625
8626                 new_crtc_state = drm_atomic_get_new_crtc_state(
8627                         state, new_con_state->crtc);
8628
8629                 if (!new_crtc_state)
8630                         continue;
8631
8632                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8633                         continue;
8634
8635         notify:
8636                 aconnector = to_amdgpu_dm_connector(connector);
8637
8638                 mutex_lock(&adev->dm.audio_lock);
8639                 inst = aconnector->audio_inst;
8640                 aconnector->audio_inst = -1;
8641                 mutex_unlock(&adev->dm.audio_lock);
8642
8643                 amdgpu_dm_audio_eld_notify(adev, inst);
8644         }
8645
8646         /* Notify audio device additions. */
8647         for_each_new_connector_in_state(state, connector, new_con_state, i) {
8648                 if (!new_con_state->crtc)
8649                         continue;
8650
8651                 new_crtc_state = drm_atomic_get_new_crtc_state(
8652                         state, new_con_state->crtc);
8653
8654                 if (!new_crtc_state)
8655                         continue;
8656
8657                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8658                         continue;
8659
8660                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8661                 if (!new_dm_crtc_state->stream)
8662                         continue;
8663
8664                 status = dc_stream_get_status(new_dm_crtc_state->stream);
8665                 if (!status)
8666                         continue;
8667
8668                 aconnector = to_amdgpu_dm_connector(connector);
8669
8670                 mutex_lock(&adev->dm.audio_lock);
8671                 inst = status->audio_inst;
8672                 aconnector->audio_inst = inst;
8673                 mutex_unlock(&adev->dm.audio_lock);
8674
8675                 amdgpu_dm_audio_eld_notify(adev, inst);
8676         }
8677 }
8678
8679 /*
8680  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8681  * @crtc_state: the DRM CRTC state
8682  * @stream_state: the DC stream state.
8683  *
8684  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8685  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8686  */
8687 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8688                                                 struct dc_stream_state *stream_state)
8689 {
8690         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8691 }
8692
8693 /**
8694  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8695  * @state: The atomic state to commit
8696  *
8697  * This will tell DC to commit the constructed DC state from atomic_check,
8698  * programming the hardware. Any failures here implies a hardware failure, since
8699  * atomic check should have filtered anything non-kosher.
8700  */
8701 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8702 {
8703         struct drm_device *dev = state->dev;
8704         struct amdgpu_device *adev = drm_to_adev(dev);
8705         struct amdgpu_display_manager *dm = &adev->dm;
8706         struct dm_atomic_state *dm_state;
8707         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8708         uint32_t i, j;
8709         struct drm_crtc *crtc;
8710         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8711         unsigned long flags;
8712         bool wait_for_vblank = true;
8713         struct drm_connector *connector;
8714         struct drm_connector_state *old_con_state, *new_con_state;
8715         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8716         int crtc_disable_count = 0;
8717         bool mode_set_reset_required = false;
8718
8719         trace_amdgpu_dm_atomic_commit_tail_begin(state);
8720
8721         drm_atomic_helper_update_legacy_modeset_state(dev, state);
8722
8723         dm_state = dm_atomic_get_new_state(state);
8724         if (dm_state && dm_state->context) {
8725                 dc_state = dm_state->context;
8726         } else {
8727                 /* No state changes, retain current state. */
8728                 dc_state_temp = dc_create_state(dm->dc);
8729                 ASSERT(dc_state_temp);
8730                 dc_state = dc_state_temp;
8731                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8732         }
8733
8734         for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8735                                        new_crtc_state, i) {
8736                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8737
8738                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8739
8740                 if (old_crtc_state->active &&
8741                     (!new_crtc_state->active ||
8742                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8743                         manage_dm_interrupts(adev, acrtc, false);
8744                         dc_stream_release(dm_old_crtc_state->stream);
8745                 }
8746         }
8747
8748         drm_atomic_helper_calc_timestamping_constants(state);
8749
8750         /* update changed items */
8751         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8752                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8753
8754                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8755                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8756
8757                 DRM_DEBUG_ATOMIC(
8758                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8759                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8760                         "connectors_changed:%d\n",
8761                         acrtc->crtc_id,
8762                         new_crtc_state->enable,
8763                         new_crtc_state->active,
8764                         new_crtc_state->planes_changed,
8765                         new_crtc_state->mode_changed,
8766                         new_crtc_state->active_changed,
8767                         new_crtc_state->connectors_changed);
8768
8769                 /* Disable cursor if disabling crtc */
8770                 if (old_crtc_state->active && !new_crtc_state->active) {
8771                         struct dc_cursor_position position;
8772
8773                         memset(&position, 0, sizeof(position));
8774                         mutex_lock(&dm->dc_lock);
8775                         dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8776                         mutex_unlock(&dm->dc_lock);
8777                 }
8778
8779                 /* Copy all transient state flags into dc state */
8780                 if (dm_new_crtc_state->stream) {
8781                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8782                                                             dm_new_crtc_state->stream);
8783                 }
8784
8785                 /* handles headless hotplug case, updating new_state and
8786                  * aconnector as needed
8787                  */
8788
8789                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8790
8791                         DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8792
8793                         if (!dm_new_crtc_state->stream) {
8794                                 /*
8795                                  * this could happen because of issues with
8796                                  * userspace notifications delivery.
8797                                  * In this case userspace tries to set mode on
8798                                  * display which is disconnected in fact.
8799                                  * dc_sink is NULL in this case on aconnector.
8800                                  * We expect reset mode will come soon.
8801                                  *
8802                                  * This can also happen when unplug is done
8803                                  * during resume sequence ended
8804                                  *
8805                                  * In this case, we want to pretend we still
8806                                  * have a sink to keep the pipe running so that
8807                                  * hw state is consistent with the sw state
8808                                  */
8809                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8810                                                 __func__, acrtc->base.base.id);
8811                                 continue;
8812                         }
8813
8814                         if (dm_old_crtc_state->stream)
8815                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8816
8817                         pm_runtime_get_noresume(dev->dev);
8818
8819                         acrtc->enabled = true;
8820                         acrtc->hw_mode = new_crtc_state->mode;
8821                         crtc->hwmode = new_crtc_state->mode;
8822                         mode_set_reset_required = true;
8823                 } else if (modereset_required(new_crtc_state)) {
8824                         DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8825                         /* i.e. reset mode */
8826                         if (dm_old_crtc_state->stream)
8827                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8828
8829                         mode_set_reset_required = true;
8830                 }
8831         } /* for_each_crtc_in_state() */
8832
8833         if (dc_state) {
8834                 /* if there mode set or reset, disable eDP PSR */
8835                 if (mode_set_reset_required)
8836                         amdgpu_dm_psr_disable_all(dm);
8837
8838                 dm_enable_per_frame_crtc_master_sync(dc_state);
8839                 mutex_lock(&dm->dc_lock);
8840                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
8841 #if defined(CONFIG_DRM_AMD_DC_DCN)
8842                /* Allow idle optimization when vblank count is 0 for display off */
8843                if (dm->active_vblank_irq_count == 0)
8844                    dc_allow_idle_optimizations(dm->dc,true);
8845 #endif
8846                 mutex_unlock(&dm->dc_lock);
8847         }
8848
8849         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8850                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8851
8852                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8853
8854                 if (dm_new_crtc_state->stream != NULL) {
8855                         const struct dc_stream_status *status =
8856                                         dc_stream_get_status(dm_new_crtc_state->stream);
8857
8858                         if (!status)
8859                                 status = dc_stream_get_status_from_state(dc_state,
8860                                                                          dm_new_crtc_state->stream);
8861                         if (!status)
8862                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8863                         else
8864                                 acrtc->otg_inst = status->primary_otg_inst;
8865                 }
8866         }
8867 #ifdef CONFIG_DRM_AMD_DC_HDCP
8868         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8869                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8870                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8871                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8872
8873                 new_crtc_state = NULL;
8874
8875                 if (acrtc)
8876                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8877
8878                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8879
8880                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8881                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8882                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8883                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8884                         dm_new_con_state->update_hdcp = true;
8885                         continue;
8886                 }
8887
8888                 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8889                         hdcp_update_display(
8890                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8891                                 new_con_state->hdcp_content_type,
8892                                 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
8893         }
8894 #endif
8895
8896         /* Handle connector state changes */
8897         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8898                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8899                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8900                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8901                 struct dc_surface_update dummy_updates[MAX_SURFACES];
8902                 struct dc_stream_update stream_update;
8903                 struct dc_info_packet hdr_packet;
8904                 struct dc_stream_status *status = NULL;
8905                 bool abm_changed, hdr_changed, scaling_changed;
8906
8907                 memset(&dummy_updates, 0, sizeof(dummy_updates));
8908                 memset(&stream_update, 0, sizeof(stream_update));
8909
8910                 if (acrtc) {
8911                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8912                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8913                 }
8914
8915                 /* Skip any modesets/resets */
8916                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8917                         continue;
8918
8919                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8920                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8921
8922                 scaling_changed = is_scaling_state_different(dm_new_con_state,
8923                                                              dm_old_con_state);
8924
8925                 abm_changed = dm_new_crtc_state->abm_level !=
8926                               dm_old_crtc_state->abm_level;
8927
8928                 hdr_changed =
8929                         is_hdr_metadata_different(old_con_state, new_con_state);
8930
8931                 if (!scaling_changed && !abm_changed && !hdr_changed)
8932                         continue;
8933
8934                 stream_update.stream = dm_new_crtc_state->stream;
8935                 if (scaling_changed) {
8936                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8937                                         dm_new_con_state, dm_new_crtc_state->stream);
8938
8939                         stream_update.src = dm_new_crtc_state->stream->src;
8940                         stream_update.dst = dm_new_crtc_state->stream->dst;
8941                 }
8942
8943                 if (abm_changed) {
8944                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8945
8946                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
8947                 }
8948
8949                 if (hdr_changed) {
8950                         fill_hdr_info_packet(new_con_state, &hdr_packet);
8951                         stream_update.hdr_static_metadata = &hdr_packet;
8952                 }
8953
8954                 status = dc_stream_get_status(dm_new_crtc_state->stream);
8955                 WARN_ON(!status);
8956                 WARN_ON(!status->plane_count);
8957
8958                 /*
8959                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
8960                  * Here we create an empty update on each plane.
8961                  * To fix this, DC should permit updating only stream properties.
8962                  */
8963                 for (j = 0; j < status->plane_count; j++)
8964                         dummy_updates[j].surface = status->plane_states[0];
8965
8966
8967                 mutex_lock(&dm->dc_lock);
8968                 dc_commit_updates_for_stream(dm->dc,
8969                                                      dummy_updates,
8970                                                      status->plane_count,
8971                                                      dm_new_crtc_state->stream,
8972                                                      &stream_update,
8973                                                      dc_state);
8974                 mutex_unlock(&dm->dc_lock);
8975         }
8976
8977         /* Count number of newly disabled CRTCs for dropping PM refs later. */
8978         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8979                                       new_crtc_state, i) {
8980                 if (old_crtc_state->active && !new_crtc_state->active)
8981                         crtc_disable_count++;
8982
8983                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8984                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8985
8986                 /* For freesync config update on crtc state and params for irq */
8987                 update_stream_irq_parameters(dm, dm_new_crtc_state);
8988
8989                 /* Handle vrr on->off / off->on transitions */
8990                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8991                                                 dm_new_crtc_state);
8992         }
8993
8994         /**
8995          * Enable interrupts for CRTCs that are newly enabled or went through
8996          * a modeset. It was intentionally deferred until after the front end
8997          * state was modified to wait until the OTG was on and so the IRQ
8998          * handlers didn't access stale or invalid state.
8999          */
9000         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9001                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9002 #ifdef CONFIG_DEBUG_FS
9003                 bool configure_crc = false;
9004                 enum amdgpu_dm_pipe_crc_source cur_crc_src;
9005 #endif
9006                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9007
9008                 if (new_crtc_state->active &&
9009                     (!old_crtc_state->active ||
9010                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9011                         dc_stream_retain(dm_new_crtc_state->stream);
9012                         acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9013                         manage_dm_interrupts(adev, acrtc, true);
9014
9015 #ifdef CONFIG_DEBUG_FS
9016                         /**
9017                          * Frontend may have changed so reapply the CRC capture
9018                          * settings for the stream.
9019                          */
9020                         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9021                         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9022                         cur_crc_src = acrtc->dm_irq_params.crc_src;
9023                         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9024
9025                         if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9026                                 configure_crc = true;
9027 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9028                                 if (amdgpu_dm_crc_window_is_activated(crtc))
9029                                         configure_crc = false;
9030 #endif
9031                         }
9032
9033                         if (configure_crc)
9034                                 amdgpu_dm_crtc_configure_crc_source(
9035                                         crtc, dm_new_crtc_state, cur_crc_src);
9036 #endif
9037                 }
9038         }
9039
9040         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9041                 if (new_crtc_state->async_flip)
9042                         wait_for_vblank = false;
9043
9044         /* update planes when needed per crtc*/
9045         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9046                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9047
9048                 if (dm_new_crtc_state->stream)
9049                         amdgpu_dm_commit_planes(state, dc_state, dev,
9050                                                 dm, crtc, wait_for_vblank);
9051         }
9052
9053         /* Update audio instances for each connector. */
9054         amdgpu_dm_commit_audio(dev, state);
9055
9056         /*
9057          * send vblank event on all events not handled in flip and
9058          * mark consumed event for drm_atomic_helper_commit_hw_done
9059          */
9060         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9061         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9062
9063                 if (new_crtc_state->event)
9064                         drm_send_event_locked(dev, &new_crtc_state->event->base);
9065
9066                 new_crtc_state->event = NULL;
9067         }
9068         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9069
9070         /* Signal HW programming completion */
9071         drm_atomic_helper_commit_hw_done(state);
9072
9073         if (wait_for_vblank)
9074                 drm_atomic_helper_wait_for_flip_done(dev, state);
9075
9076         drm_atomic_helper_cleanup_planes(dev, state);
9077
9078         /* return the stolen vga memory back to VRAM */
9079         if (!adev->mman.keep_stolen_vga_memory)
9080                 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9081         amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9082
9083         /*
9084          * Finally, drop a runtime PM reference for each newly disabled CRTC,
9085          * so we can put the GPU into runtime suspend if we're not driving any
9086          * displays anymore
9087          */
9088         for (i = 0; i < crtc_disable_count; i++)
9089                 pm_runtime_put_autosuspend(dev->dev);
9090         pm_runtime_mark_last_busy(dev->dev);
9091
9092         if (dc_state_temp)
9093                 dc_release_state(dc_state_temp);
9094 }
9095
9096
9097 static int dm_force_atomic_commit(struct drm_connector *connector)
9098 {
9099         int ret = 0;
9100         struct drm_device *ddev = connector->dev;
9101         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9102         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9103         struct drm_plane *plane = disconnected_acrtc->base.primary;
9104         struct drm_connector_state *conn_state;
9105         struct drm_crtc_state *crtc_state;
9106         struct drm_plane_state *plane_state;
9107
9108         if (!state)
9109                 return -ENOMEM;
9110
9111         state->acquire_ctx = ddev->mode_config.acquire_ctx;
9112
9113         /* Construct an atomic state to restore previous display setting */
9114
9115         /*
9116          * Attach connectors to drm_atomic_state
9117          */
9118         conn_state = drm_atomic_get_connector_state(state, connector);
9119
9120         ret = PTR_ERR_OR_ZERO(conn_state);
9121         if (ret)
9122                 goto out;
9123
9124         /* Attach crtc to drm_atomic_state*/
9125         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9126
9127         ret = PTR_ERR_OR_ZERO(crtc_state);
9128         if (ret)
9129                 goto out;
9130
9131         /* force a restore */
9132         crtc_state->mode_changed = true;
9133
9134         /* Attach plane to drm_atomic_state */
9135         plane_state = drm_atomic_get_plane_state(state, plane);
9136
9137         ret = PTR_ERR_OR_ZERO(plane_state);
9138         if (ret)
9139                 goto out;
9140
9141         /* Call commit internally with the state we just constructed */
9142         ret = drm_atomic_commit(state);
9143
9144 out:
9145         drm_atomic_state_put(state);
9146         if (ret)
9147                 DRM_ERROR("Restoring old state failed with %i\n", ret);
9148
9149         return ret;
9150 }
9151
9152 /*
9153  * This function handles all cases when set mode does not come upon hotplug.
9154  * This includes when a display is unplugged then plugged back into the
9155  * same port and when running without usermode desktop manager supprot
9156  */
9157 void dm_restore_drm_connector_state(struct drm_device *dev,
9158                                     struct drm_connector *connector)
9159 {
9160         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9161         struct amdgpu_crtc *disconnected_acrtc;
9162         struct dm_crtc_state *acrtc_state;
9163
9164         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9165                 return;
9166
9167         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9168         if (!disconnected_acrtc)
9169                 return;
9170
9171         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9172         if (!acrtc_state->stream)
9173                 return;
9174
9175         /*
9176          * If the previous sink is not released and different from the current,
9177          * we deduce we are in a state where we can not rely on usermode call
9178          * to turn on the display, so we do it here
9179          */
9180         if (acrtc_state->stream->sink != aconnector->dc_sink)
9181                 dm_force_atomic_commit(&aconnector->base);
9182 }
9183
9184 /*
9185  * Grabs all modesetting locks to serialize against any blocking commits,
9186  * Waits for completion of all non blocking commits.
9187  */
9188 static int do_aquire_global_lock(struct drm_device *dev,
9189                                  struct drm_atomic_state *state)
9190 {
9191         struct drm_crtc *crtc;
9192         struct drm_crtc_commit *commit;
9193         long ret;
9194
9195         /*
9196          * Adding all modeset locks to aquire_ctx will
9197          * ensure that when the framework release it the
9198          * extra locks we are locking here will get released to
9199          */
9200         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9201         if (ret)
9202                 return ret;
9203
9204         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9205                 spin_lock(&crtc->commit_lock);
9206                 commit = list_first_entry_or_null(&crtc->commit_list,
9207                                 struct drm_crtc_commit, commit_entry);
9208                 if (commit)
9209                         drm_crtc_commit_get(commit);
9210                 spin_unlock(&crtc->commit_lock);
9211
9212                 if (!commit)
9213                         continue;
9214
9215                 /*
9216                  * Make sure all pending HW programming completed and
9217                  * page flips done
9218                  */
9219                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9220
9221                 if (ret > 0)
9222                         ret = wait_for_completion_interruptible_timeout(
9223                                         &commit->flip_done, 10*HZ);
9224
9225                 if (ret == 0)
9226                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9227                                   "timed out\n", crtc->base.id, crtc->name);
9228
9229                 drm_crtc_commit_put(commit);
9230         }
9231
9232         return ret < 0 ? ret : 0;
9233 }
9234
9235 static void get_freesync_config_for_crtc(
9236         struct dm_crtc_state *new_crtc_state,
9237         struct dm_connector_state *new_con_state)
9238 {
9239         struct mod_freesync_config config = {0};
9240         struct amdgpu_dm_connector *aconnector =
9241                         to_amdgpu_dm_connector(new_con_state->base.connector);
9242         struct drm_display_mode *mode = &new_crtc_state->base.mode;
9243         int vrefresh = drm_mode_vrefresh(mode);
9244         bool fs_vid_mode = false;
9245
9246         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9247                                         vrefresh >= aconnector->min_vfreq &&
9248                                         vrefresh <= aconnector->max_vfreq;
9249
9250         if (new_crtc_state->vrr_supported) {
9251                 new_crtc_state->stream->ignore_msa_timing_param = true;
9252                 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9253
9254                 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9255                 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9256                 config.vsif_supported = true;
9257                 config.btr = true;
9258
9259                 if (fs_vid_mode) {
9260                         config.state = VRR_STATE_ACTIVE_FIXED;
9261                         config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9262                         goto out;
9263                 } else if (new_crtc_state->base.vrr_enabled) {
9264                         config.state = VRR_STATE_ACTIVE_VARIABLE;
9265                 } else {
9266                         config.state = VRR_STATE_INACTIVE;
9267                 }
9268         }
9269 out:
9270         new_crtc_state->freesync_config = config;
9271 }
9272
9273 static void reset_freesync_config_for_crtc(
9274         struct dm_crtc_state *new_crtc_state)
9275 {
9276         new_crtc_state->vrr_supported = false;
9277
9278         memset(&new_crtc_state->vrr_infopacket, 0,
9279                sizeof(new_crtc_state->vrr_infopacket));
9280 }
9281
9282 static bool
9283 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9284                                  struct drm_crtc_state *new_crtc_state)
9285 {
9286         struct drm_display_mode old_mode, new_mode;
9287
9288         if (!old_crtc_state || !new_crtc_state)
9289                 return false;
9290
9291         old_mode = old_crtc_state->mode;
9292         new_mode = new_crtc_state->mode;
9293
9294         if (old_mode.clock       == new_mode.clock &&
9295             old_mode.hdisplay    == new_mode.hdisplay &&
9296             old_mode.vdisplay    == new_mode.vdisplay &&
9297             old_mode.htotal      == new_mode.htotal &&
9298             old_mode.vtotal      != new_mode.vtotal &&
9299             old_mode.hsync_start == new_mode.hsync_start &&
9300             old_mode.vsync_start != new_mode.vsync_start &&
9301             old_mode.hsync_end   == new_mode.hsync_end &&
9302             old_mode.vsync_end   != new_mode.vsync_end &&
9303             old_mode.hskew       == new_mode.hskew &&
9304             old_mode.vscan       == new_mode.vscan &&
9305             (old_mode.vsync_end - old_mode.vsync_start) ==
9306             (new_mode.vsync_end - new_mode.vsync_start))
9307                 return true;
9308
9309         return false;
9310 }
9311
9312 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9313         uint64_t num, den, res;
9314         struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9315
9316         dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9317
9318         num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9319         den = (unsigned long long)new_crtc_state->mode.htotal *
9320               (unsigned long long)new_crtc_state->mode.vtotal;
9321
9322         res = div_u64(num, den);
9323         dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9324 }
9325
9326 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9327                                 struct drm_atomic_state *state,
9328                                 struct drm_crtc *crtc,
9329                                 struct drm_crtc_state *old_crtc_state,
9330                                 struct drm_crtc_state *new_crtc_state,
9331                                 bool enable,
9332                                 bool *lock_and_validation_needed)
9333 {
9334         struct dm_atomic_state *dm_state = NULL;
9335         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9336         struct dc_stream_state *new_stream;
9337         int ret = 0;
9338
9339         /*
9340          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9341          * update changed items
9342          */
9343         struct amdgpu_crtc *acrtc = NULL;
9344         struct amdgpu_dm_connector *aconnector = NULL;
9345         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9346         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
9347
9348         new_stream = NULL;
9349
9350         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9351         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9352         acrtc = to_amdgpu_crtc(crtc);
9353         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
9354
9355         /* TODO This hack should go away */
9356         if (aconnector && enable) {
9357                 /* Make sure fake sink is created in plug-in scenario */
9358                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9359                                                             &aconnector->base);
9360                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9361                                                             &aconnector->base);
9362
9363                 if (IS_ERR(drm_new_conn_state)) {
9364                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9365                         goto fail;
9366                 }
9367
9368                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9369                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
9370
9371                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9372                         goto skip_modeset;
9373
9374                 new_stream = create_validate_stream_for_sink(aconnector,
9375                                                              &new_crtc_state->mode,
9376                                                              dm_new_conn_state,
9377                                                              dm_old_crtc_state->stream);
9378
9379                 /*
9380                  * we can have no stream on ACTION_SET if a display
9381                  * was disconnected during S3, in this case it is not an
9382                  * error, the OS will be updated after detection, and
9383                  * will do the right thing on next atomic commit
9384                  */
9385
9386                 if (!new_stream) {
9387                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9388                                         __func__, acrtc->base.base.id);
9389                         ret = -ENOMEM;
9390                         goto fail;
9391                 }
9392
9393                 /*
9394                  * TODO: Check VSDB bits to decide whether this should
9395                  * be enabled or not.
9396                  */
9397                 new_stream->triggered_crtc_reset.enabled =
9398                         dm->force_timing_sync;
9399
9400                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9401
9402                 ret = fill_hdr_info_packet(drm_new_conn_state,
9403                                            &new_stream->hdr_static_metadata);
9404                 if (ret)
9405                         goto fail;
9406
9407                 /*
9408                  * If we already removed the old stream from the context
9409                  * (and set the new stream to NULL) then we can't reuse
9410                  * the old stream even if the stream and scaling are unchanged.
9411                  * We'll hit the BUG_ON and black screen.
9412                  *
9413                  * TODO: Refactor this function to allow this check to work
9414                  * in all conditions.
9415                  */
9416                 if (amdgpu_freesync_vid_mode &&
9417                     dm_new_crtc_state->stream &&
9418                     is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9419                         goto skip_modeset;
9420
9421                 if (dm_new_crtc_state->stream &&
9422                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9423                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9424                         new_crtc_state->mode_changed = false;
9425                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9426                                          new_crtc_state->mode_changed);
9427                 }
9428         }
9429
9430         /* mode_changed flag may get updated above, need to check again */
9431         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9432                 goto skip_modeset;
9433
9434         DRM_DEBUG_ATOMIC(
9435                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9436                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9437                 "connectors_changed:%d\n",
9438                 acrtc->crtc_id,
9439                 new_crtc_state->enable,
9440                 new_crtc_state->active,
9441                 new_crtc_state->planes_changed,
9442                 new_crtc_state->mode_changed,
9443                 new_crtc_state->active_changed,
9444                 new_crtc_state->connectors_changed);
9445
9446         /* Remove stream for any changed/disabled CRTC */
9447         if (!enable) {
9448
9449                 if (!dm_old_crtc_state->stream)
9450                         goto skip_modeset;
9451
9452                 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
9453                     is_timing_unchanged_for_freesync(new_crtc_state,
9454                                                      old_crtc_state)) {
9455                         new_crtc_state->mode_changed = false;
9456                         DRM_DEBUG_DRIVER(
9457                                 "Mode change not required for front porch change, "
9458                                 "setting mode_changed to %d",
9459                                 new_crtc_state->mode_changed);
9460
9461                         set_freesync_fixed_config(dm_new_crtc_state);
9462
9463                         goto skip_modeset;
9464                 } else if (amdgpu_freesync_vid_mode && aconnector &&
9465                            is_freesync_video_mode(&new_crtc_state->mode,
9466                                                   aconnector)) {
9467                         set_freesync_fixed_config(dm_new_crtc_state);
9468                 }
9469
9470                 ret = dm_atomic_get_state(state, &dm_state);
9471                 if (ret)
9472                         goto fail;
9473
9474                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9475                                 crtc->base.id);
9476
9477                 /* i.e. reset mode */
9478                 if (dc_remove_stream_from_ctx(
9479                                 dm->dc,
9480                                 dm_state->context,
9481                                 dm_old_crtc_state->stream) != DC_OK) {
9482                         ret = -EINVAL;
9483                         goto fail;
9484                 }
9485
9486                 dc_stream_release(dm_old_crtc_state->stream);
9487                 dm_new_crtc_state->stream = NULL;
9488
9489                 reset_freesync_config_for_crtc(dm_new_crtc_state);
9490
9491                 *lock_and_validation_needed = true;
9492
9493         } else {/* Add stream for any updated/enabled CRTC */
9494                 /*
9495                  * Quick fix to prevent NULL pointer on new_stream when
9496                  * added MST connectors not found in existing crtc_state in the chained mode
9497                  * TODO: need to dig out the root cause of that
9498                  */
9499                 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
9500                         goto skip_modeset;
9501
9502                 if (modereset_required(new_crtc_state))
9503                         goto skip_modeset;
9504
9505                 if (modeset_required(new_crtc_state, new_stream,
9506                                      dm_old_crtc_state->stream)) {
9507
9508                         WARN_ON(dm_new_crtc_state->stream);
9509
9510                         ret = dm_atomic_get_state(state, &dm_state);
9511                         if (ret)
9512                                 goto fail;
9513
9514                         dm_new_crtc_state->stream = new_stream;
9515
9516                         dc_stream_retain(new_stream);
9517
9518                         DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
9519                                          crtc->base.id);
9520
9521                         if (dc_add_stream_to_ctx(
9522                                         dm->dc,
9523                                         dm_state->context,
9524                                         dm_new_crtc_state->stream) != DC_OK) {
9525                                 ret = -EINVAL;
9526                                 goto fail;
9527                         }
9528
9529                         *lock_and_validation_needed = true;
9530                 }
9531         }
9532
9533 skip_modeset:
9534         /* Release extra reference */
9535         if (new_stream)
9536                  dc_stream_release(new_stream);
9537
9538         /*
9539          * We want to do dc stream updates that do not require a
9540          * full modeset below.
9541          */
9542         if (!(enable && aconnector && new_crtc_state->active))
9543                 return 0;
9544         /*
9545          * Given above conditions, the dc state cannot be NULL because:
9546          * 1. We're in the process of enabling CRTCs (just been added
9547          *    to the dc context, or already is on the context)
9548          * 2. Has a valid connector attached, and
9549          * 3. Is currently active and enabled.
9550          * => The dc stream state currently exists.
9551          */
9552         BUG_ON(dm_new_crtc_state->stream == NULL);
9553
9554         /* Scaling or underscan settings */
9555         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
9556                 update_stream_scaling_settings(
9557                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
9558
9559         /* ABM settings */
9560         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9561
9562         /*
9563          * Color management settings. We also update color properties
9564          * when a modeset is needed, to ensure it gets reprogrammed.
9565          */
9566         if (dm_new_crtc_state->base.color_mgmt_changed ||
9567             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9568                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
9569                 if (ret)
9570                         goto fail;
9571         }
9572
9573         /* Update Freesync settings. */
9574         get_freesync_config_for_crtc(dm_new_crtc_state,
9575                                      dm_new_conn_state);
9576
9577         return ret;
9578
9579 fail:
9580         if (new_stream)
9581                 dc_stream_release(new_stream);
9582         return ret;
9583 }
9584
9585 static bool should_reset_plane(struct drm_atomic_state *state,
9586                                struct drm_plane *plane,
9587                                struct drm_plane_state *old_plane_state,
9588                                struct drm_plane_state *new_plane_state)
9589 {
9590         struct drm_plane *other;
9591         struct drm_plane_state *old_other_state, *new_other_state;
9592         struct drm_crtc_state *new_crtc_state;
9593         int i;
9594
9595         /*
9596          * TODO: Remove this hack once the checks below are sufficient
9597          * enough to determine when we need to reset all the planes on
9598          * the stream.
9599          */
9600         if (state->allow_modeset)
9601                 return true;
9602
9603         /* Exit early if we know that we're adding or removing the plane. */
9604         if (old_plane_state->crtc != new_plane_state->crtc)
9605                 return true;
9606
9607         /* old crtc == new_crtc == NULL, plane not in context. */
9608         if (!new_plane_state->crtc)
9609                 return false;
9610
9611         new_crtc_state =
9612                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9613
9614         if (!new_crtc_state)
9615                 return true;
9616
9617         /* CRTC Degamma changes currently require us to recreate planes. */
9618         if (new_crtc_state->color_mgmt_changed)
9619                 return true;
9620
9621         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9622                 return true;
9623
9624         /*
9625          * If there are any new primary or overlay planes being added or
9626          * removed then the z-order can potentially change. To ensure
9627          * correct z-order and pipe acquisition the current DC architecture
9628          * requires us to remove and recreate all existing planes.
9629          *
9630          * TODO: Come up with a more elegant solution for this.
9631          */
9632         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9633                 struct amdgpu_framebuffer *old_afb, *new_afb;
9634                 if (other->type == DRM_PLANE_TYPE_CURSOR)
9635                         continue;
9636
9637                 if (old_other_state->crtc != new_plane_state->crtc &&
9638                     new_other_state->crtc != new_plane_state->crtc)
9639                         continue;
9640
9641                 if (old_other_state->crtc != new_other_state->crtc)
9642                         return true;
9643
9644                 /* Src/dst size and scaling updates. */
9645                 if (old_other_state->src_w != new_other_state->src_w ||
9646                     old_other_state->src_h != new_other_state->src_h ||
9647                     old_other_state->crtc_w != new_other_state->crtc_w ||
9648                     old_other_state->crtc_h != new_other_state->crtc_h)
9649                         return true;
9650
9651                 /* Rotation / mirroring updates. */
9652                 if (old_other_state->rotation != new_other_state->rotation)
9653                         return true;
9654
9655                 /* Blending updates. */
9656                 if (old_other_state->pixel_blend_mode !=
9657                     new_other_state->pixel_blend_mode)
9658                         return true;
9659
9660                 /* Alpha updates. */
9661                 if (old_other_state->alpha != new_other_state->alpha)
9662                         return true;
9663
9664                 /* Colorspace changes. */
9665                 if (old_other_state->color_range != new_other_state->color_range ||
9666                     old_other_state->color_encoding != new_other_state->color_encoding)
9667                         return true;
9668
9669                 /* Framebuffer checks fall at the end. */
9670                 if (!old_other_state->fb || !new_other_state->fb)
9671                         continue;
9672
9673                 /* Pixel format changes can require bandwidth updates. */
9674                 if (old_other_state->fb->format != new_other_state->fb->format)
9675                         return true;
9676
9677                 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9678                 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9679
9680                 /* Tiling and DCC changes also require bandwidth updates. */
9681                 if (old_afb->tiling_flags != new_afb->tiling_flags ||
9682                     old_afb->base.modifier != new_afb->base.modifier)
9683                         return true;
9684         }
9685
9686         return false;
9687 }
9688
9689 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9690                               struct drm_plane_state *new_plane_state,
9691                               struct drm_framebuffer *fb)
9692 {
9693         struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9694         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9695         unsigned int pitch;
9696         bool linear;
9697
9698         if (fb->width > new_acrtc->max_cursor_width ||
9699             fb->height > new_acrtc->max_cursor_height) {
9700                 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9701                                  new_plane_state->fb->width,
9702                                  new_plane_state->fb->height);
9703                 return -EINVAL;
9704         }
9705         if (new_plane_state->src_w != fb->width << 16 ||
9706             new_plane_state->src_h != fb->height << 16) {
9707                 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9708                 return -EINVAL;
9709         }
9710
9711         /* Pitch in pixels */
9712         pitch = fb->pitches[0] / fb->format->cpp[0];
9713
9714         if (fb->width != pitch) {
9715                 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9716                                  fb->width, pitch);
9717                 return -EINVAL;
9718         }
9719
9720         switch (pitch) {
9721         case 64:
9722         case 128:
9723         case 256:
9724                 /* FB pitch is supported by cursor plane */
9725                 break;
9726         default:
9727                 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9728                 return -EINVAL;
9729         }
9730
9731         /* Core DRM takes care of checking FB modifiers, so we only need to
9732          * check tiling flags when the FB doesn't have a modifier. */
9733         if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9734                 if (adev->family < AMDGPU_FAMILY_AI) {
9735                         linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9736                                  AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9737                                  AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9738                 } else {
9739                         linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9740                 }
9741                 if (!linear) {
9742                         DRM_DEBUG_ATOMIC("Cursor FB not linear");
9743                         return -EINVAL;
9744                 }
9745         }
9746
9747         return 0;
9748 }
9749
9750 static int dm_update_plane_state(struct dc *dc,
9751                                  struct drm_atomic_state *state,
9752                                  struct drm_plane *plane,
9753                                  struct drm_plane_state *old_plane_state,
9754                                  struct drm_plane_state *new_plane_state,
9755                                  bool enable,
9756                                  bool *lock_and_validation_needed)
9757 {
9758
9759         struct dm_atomic_state *dm_state = NULL;
9760         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9761         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9762         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9763         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9764         struct amdgpu_crtc *new_acrtc;
9765         bool needs_reset;
9766         int ret = 0;
9767
9768
9769         new_plane_crtc = new_plane_state->crtc;
9770         old_plane_crtc = old_plane_state->crtc;
9771         dm_new_plane_state = to_dm_plane_state(new_plane_state);
9772         dm_old_plane_state = to_dm_plane_state(old_plane_state);
9773
9774         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9775                 if (!enable || !new_plane_crtc ||
9776                         drm_atomic_plane_disabling(plane->state, new_plane_state))
9777                         return 0;
9778
9779                 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9780
9781                 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9782                         DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9783                         return -EINVAL;
9784                 }
9785
9786                 if (new_plane_state->fb) {
9787                         ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9788                                                  new_plane_state->fb);
9789                         if (ret)
9790                                 return ret;
9791                 }
9792
9793                 return 0;
9794         }
9795
9796         needs_reset = should_reset_plane(state, plane, old_plane_state,
9797                                          new_plane_state);
9798
9799         /* Remove any changed/removed planes */
9800         if (!enable) {
9801                 if (!needs_reset)
9802                         return 0;
9803
9804                 if (!old_plane_crtc)
9805                         return 0;
9806
9807                 old_crtc_state = drm_atomic_get_old_crtc_state(
9808                                 state, old_plane_crtc);
9809                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9810
9811                 if (!dm_old_crtc_state->stream)
9812                         return 0;
9813
9814                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9815                                 plane->base.id, old_plane_crtc->base.id);
9816
9817                 ret = dm_atomic_get_state(state, &dm_state);
9818                 if (ret)
9819                         return ret;
9820
9821                 if (!dc_remove_plane_from_context(
9822                                 dc,
9823                                 dm_old_crtc_state->stream,
9824                                 dm_old_plane_state->dc_state,
9825                                 dm_state->context)) {
9826
9827                         return -EINVAL;
9828                 }
9829
9830
9831                 dc_plane_state_release(dm_old_plane_state->dc_state);
9832                 dm_new_plane_state->dc_state = NULL;
9833
9834                 *lock_and_validation_needed = true;
9835
9836         } else { /* Add new planes */
9837                 struct dc_plane_state *dc_new_plane_state;
9838
9839                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9840                         return 0;
9841
9842                 if (!new_plane_crtc)
9843                         return 0;
9844
9845                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9846                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9847
9848                 if (!dm_new_crtc_state->stream)
9849                         return 0;
9850
9851                 if (!needs_reset)
9852                         return 0;
9853
9854                 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9855                 if (ret)
9856                         return ret;
9857
9858                 WARN_ON(dm_new_plane_state->dc_state);
9859
9860                 dc_new_plane_state = dc_create_plane_state(dc);
9861                 if (!dc_new_plane_state)
9862                         return -ENOMEM;
9863
9864                 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
9865                                  plane->base.id, new_plane_crtc->base.id);
9866
9867                 ret = fill_dc_plane_attributes(
9868                         drm_to_adev(new_plane_crtc->dev),
9869                         dc_new_plane_state,
9870                         new_plane_state,
9871                         new_crtc_state);
9872                 if (ret) {
9873                         dc_plane_state_release(dc_new_plane_state);
9874                         return ret;
9875                 }
9876
9877                 ret = dm_atomic_get_state(state, &dm_state);
9878                 if (ret) {
9879                         dc_plane_state_release(dc_new_plane_state);
9880                         return ret;
9881                 }
9882
9883                 /*
9884                  * Any atomic check errors that occur after this will
9885                  * not need a release. The plane state will be attached
9886                  * to the stream, and therefore part of the atomic
9887                  * state. It'll be released when the atomic state is
9888                  * cleaned.
9889                  */
9890                 if (!dc_add_plane_to_context(
9891                                 dc,
9892                                 dm_new_crtc_state->stream,
9893                                 dc_new_plane_state,
9894                                 dm_state->context)) {
9895
9896                         dc_plane_state_release(dc_new_plane_state);
9897                         return -EINVAL;
9898                 }
9899
9900                 dm_new_plane_state->dc_state = dc_new_plane_state;
9901
9902                 /* Tell DC to do a full surface update every time there
9903                  * is a plane change. Inefficient, but works for now.
9904                  */
9905                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9906
9907                 *lock_and_validation_needed = true;
9908         }
9909
9910
9911         return ret;
9912 }
9913
9914 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9915                                 struct drm_crtc *crtc,
9916                                 struct drm_crtc_state *new_crtc_state)
9917 {
9918         struct drm_plane_state *new_cursor_state, *new_primary_state;
9919         int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9920
9921         /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9922          * cursor per pipe but it's going to inherit the scaling and
9923          * positioning from the underlying pipe. Check the cursor plane's
9924          * blending properties match the primary plane's. */
9925
9926         new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9927         new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9928         if (!new_cursor_state || !new_primary_state ||
9929             !new_cursor_state->fb || !new_primary_state->fb) {
9930                 return 0;
9931         }
9932
9933         cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9934                          (new_cursor_state->src_w >> 16);
9935         cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9936                          (new_cursor_state->src_h >> 16);
9937
9938         primary_scale_w = new_primary_state->crtc_w * 1000 /
9939                          (new_primary_state->src_w >> 16);
9940         primary_scale_h = new_primary_state->crtc_h * 1000 /
9941                          (new_primary_state->src_h >> 16);
9942
9943         if (cursor_scale_w != primary_scale_w ||
9944             cursor_scale_h != primary_scale_h) {
9945                 DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9946                 return -EINVAL;
9947         }
9948
9949         return 0;
9950 }
9951
9952 #if defined(CONFIG_DRM_AMD_DC_DCN)
9953 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9954 {
9955         struct drm_connector *connector;
9956         struct drm_connector_state *conn_state;
9957         struct amdgpu_dm_connector *aconnector = NULL;
9958         int i;
9959         for_each_new_connector_in_state(state, connector, conn_state, i) {
9960                 if (conn_state->crtc != crtc)
9961                         continue;
9962
9963                 aconnector = to_amdgpu_dm_connector(connector);
9964                 if (!aconnector->port || !aconnector->mst_port)
9965                         aconnector = NULL;
9966                 else
9967                         break;
9968         }
9969
9970         if (!aconnector)
9971                 return 0;
9972
9973         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9974 }
9975 #endif
9976
9977 static int validate_overlay(struct drm_atomic_state *state)
9978 {
9979         int i;
9980         struct drm_plane *plane;
9981         struct drm_plane_state *old_plane_state, *new_plane_state;
9982         struct drm_plane_state *primary_state, *overlay_state = NULL;
9983
9984         /* Check if primary plane is contained inside overlay */
9985         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9986                 if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
9987                         if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9988                                 return 0;
9989
9990                         overlay_state = new_plane_state;
9991                         continue;
9992                 }
9993         }
9994
9995         /* check if we're making changes to the overlay plane */
9996         if (!overlay_state)
9997                 return 0;
9998
9999         /* check if overlay plane is enabled */
10000         if (!overlay_state->crtc)
10001                 return 0;
10002
10003         /* find the primary plane for the CRTC that the overlay is enabled on */
10004         primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
10005         if (IS_ERR(primary_state))
10006                 return PTR_ERR(primary_state);
10007
10008         /* check if primary plane is enabled */
10009         if (!primary_state->crtc)
10010                 return 0;
10011
10012         /* Perform the bounds check to ensure the overlay plane covers the primary */
10013         if (primary_state->crtc_x < overlay_state->crtc_x ||
10014             primary_state->crtc_y < overlay_state->crtc_y ||
10015             primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
10016             primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
10017                 DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
10018                 return -EINVAL;
10019         }
10020
10021         return 0;
10022 }
10023
10024 /**
10025  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10026  * @dev: The DRM device
10027  * @state: The atomic state to commit
10028  *
10029  * Validate that the given atomic state is programmable by DC into hardware.
10030  * This involves constructing a &struct dc_state reflecting the new hardware
10031  * state we wish to commit, then querying DC to see if it is programmable. It's
10032  * important not to modify the existing DC state. Otherwise, atomic_check
10033  * may unexpectedly commit hardware changes.
10034  *
10035  * When validating the DC state, it's important that the right locks are
10036  * acquired. For full updates case which removes/adds/updates streams on one
10037  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10038  * that any such full update commit will wait for completion of any outstanding
10039  * flip using DRMs synchronization events.
10040  *
10041  * Note that DM adds the affected connectors for all CRTCs in state, when that
10042  * might not seem necessary. This is because DC stream creation requires the
10043  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10044  * be possible but non-trivial - a possible TODO item.
10045  *
10046  * Return: -Error code if validation failed.
10047  */
10048 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10049                                   struct drm_atomic_state *state)
10050 {
10051         struct amdgpu_device *adev = drm_to_adev(dev);
10052         struct dm_atomic_state *dm_state = NULL;
10053         struct dc *dc = adev->dm.dc;
10054         struct drm_connector *connector;
10055         struct drm_connector_state *old_con_state, *new_con_state;
10056         struct drm_crtc *crtc;
10057         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10058         struct drm_plane *plane;
10059         struct drm_plane_state *old_plane_state, *new_plane_state;
10060         enum dc_status status;
10061         int ret, i;
10062         bool lock_and_validation_needed = false;
10063         struct dm_crtc_state *dm_old_crtc_state;
10064
10065         trace_amdgpu_dm_atomic_check_begin(state);
10066
10067         ret = drm_atomic_helper_check_modeset(dev, state);
10068         if (ret)
10069                 goto fail;
10070
10071         /* Check connector changes */
10072         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10073                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10074                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10075
10076                 /* Skip connectors that are disabled or part of modeset already. */
10077                 if (!old_con_state->crtc && !new_con_state->crtc)
10078                         continue;
10079
10080                 if (!new_con_state->crtc)
10081                         continue;
10082
10083                 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10084                 if (IS_ERR(new_crtc_state)) {
10085                         ret = PTR_ERR(new_crtc_state);
10086                         goto fail;
10087                 }
10088
10089                 if (dm_old_con_state->abm_level !=
10090                     dm_new_con_state->abm_level)
10091                         new_crtc_state->connectors_changed = true;
10092         }
10093
10094 #if defined(CONFIG_DRM_AMD_DC_DCN)
10095         if (dc_resource_is_dsc_encoding_supported(dc)) {
10096                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10097                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10098                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
10099                                 if (ret)
10100                                         goto fail;
10101                         }
10102                 }
10103         }
10104 #endif
10105         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10106                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10107
10108                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10109                     !new_crtc_state->color_mgmt_changed &&
10110                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10111                         dm_old_crtc_state->dsc_force_changed == false)
10112                         continue;
10113
10114                 if (!new_crtc_state->enable)
10115                         continue;
10116
10117                 ret = drm_atomic_add_affected_connectors(state, crtc);
10118                 if (ret)
10119                         return ret;
10120
10121                 ret = drm_atomic_add_affected_planes(state, crtc);
10122                 if (ret)
10123                         goto fail;
10124
10125                 if (dm_old_crtc_state->dsc_force_changed)
10126                         new_crtc_state->mode_changed = true;
10127         }
10128
10129         /*
10130          * Add all primary and overlay planes on the CRTC to the state
10131          * whenever a plane is enabled to maintain correct z-ordering
10132          * and to enable fast surface updates.
10133          */
10134         drm_for_each_crtc(crtc, dev) {
10135                 bool modified = false;
10136
10137                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10138                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
10139                                 continue;
10140
10141                         if (new_plane_state->crtc == crtc ||
10142                             old_plane_state->crtc == crtc) {
10143                                 modified = true;
10144                                 break;
10145                         }
10146                 }
10147
10148                 if (!modified)
10149                         continue;
10150
10151                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10152                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
10153                                 continue;
10154
10155                         new_plane_state =
10156                                 drm_atomic_get_plane_state(state, plane);
10157
10158                         if (IS_ERR(new_plane_state)) {
10159                                 ret = PTR_ERR(new_plane_state);
10160                                 goto fail;
10161                         }
10162                 }
10163         }
10164
10165         /* Remove exiting planes if they are modified */
10166         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10167                 ret = dm_update_plane_state(dc, state, plane,
10168                                             old_plane_state,
10169                                             new_plane_state,
10170                                             false,
10171                                             &lock_and_validation_needed);
10172                 if (ret)
10173                         goto fail;
10174         }
10175
10176         /* Disable all crtcs which require disable */
10177         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10178                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10179                                            old_crtc_state,
10180                                            new_crtc_state,
10181                                            false,
10182                                            &lock_and_validation_needed);
10183                 if (ret)
10184                         goto fail;
10185         }
10186
10187         /* Enable all crtcs which require enable */
10188         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10189                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10190                                            old_crtc_state,
10191                                            new_crtc_state,
10192                                            true,
10193                                            &lock_and_validation_needed);
10194                 if (ret)
10195                         goto fail;
10196         }
10197
10198         ret = validate_overlay(state);
10199         if (ret)
10200                 goto fail;
10201
10202         /* Add new/modified planes */
10203         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10204                 ret = dm_update_plane_state(dc, state, plane,
10205                                             old_plane_state,
10206                                             new_plane_state,
10207                                             true,
10208                                             &lock_and_validation_needed);
10209                 if (ret)
10210                         goto fail;
10211         }
10212
10213         /* Run this here since we want to validate the streams we created */
10214         ret = drm_atomic_helper_check_planes(dev, state);
10215         if (ret)
10216                 goto fail;
10217
10218         /* Check cursor planes scaling */
10219         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10220                 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10221                 if (ret)
10222                         goto fail;
10223         }
10224
10225         if (state->legacy_cursor_update) {
10226                 /*
10227                  * This is a fast cursor update coming from the plane update
10228                  * helper, check if it can be done asynchronously for better
10229                  * performance.
10230                  */
10231                 state->async_update =
10232                         !drm_atomic_helper_async_check(dev, state);
10233
10234                 /*
10235                  * Skip the remaining global validation if this is an async
10236                  * update. Cursor updates can be done without affecting
10237                  * state or bandwidth calcs and this avoids the performance
10238                  * penalty of locking the private state object and
10239                  * allocating a new dc_state.
10240                  */
10241                 if (state->async_update)
10242                         return 0;
10243         }
10244
10245         /* Check scaling and underscan changes*/
10246         /* TODO Removed scaling changes validation due to inability to commit
10247          * new stream into context w\o causing full reset. Need to
10248          * decide how to handle.
10249          */
10250         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10251                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10252                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10253                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10254
10255                 /* Skip any modesets/resets */
10256                 if (!acrtc || drm_atomic_crtc_needs_modeset(
10257                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10258                         continue;
10259
10260                 /* Skip any thing not scale or underscan changes */
10261                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10262                         continue;
10263
10264                 lock_and_validation_needed = true;
10265         }
10266
10267         /**
10268          * Streams and planes are reset when there are changes that affect
10269          * bandwidth. Anything that affects bandwidth needs to go through
10270          * DC global validation to ensure that the configuration can be applied
10271          * to hardware.
10272          *
10273          * We have to currently stall out here in atomic_check for outstanding
10274          * commits to finish in this case because our IRQ handlers reference
10275          * DRM state directly - we can end up disabling interrupts too early
10276          * if we don't.
10277          *
10278          * TODO: Remove this stall and drop DM state private objects.
10279          */
10280         if (lock_and_validation_needed) {
10281                 ret = dm_atomic_get_state(state, &dm_state);
10282                 if (ret)
10283                         goto fail;
10284
10285                 ret = do_aquire_global_lock(dev, state);
10286                 if (ret)
10287                         goto fail;
10288
10289 #if defined(CONFIG_DRM_AMD_DC_DCN)
10290                 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
10291                         goto fail;
10292
10293                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
10294                 if (ret)
10295                         goto fail;
10296 #endif
10297
10298                 /*
10299                  * Perform validation of MST topology in the state:
10300                  * We need to perform MST atomic check before calling
10301                  * dc_validate_global_state(), or there is a chance
10302                  * to get stuck in an infinite loop and hang eventually.
10303                  */
10304                 ret = drm_dp_mst_atomic_check(state);
10305                 if (ret)
10306                         goto fail;
10307                 status = dc_validate_global_state(dc, dm_state->context, false);
10308                 if (status != DC_OK) {
10309                         DC_LOG_WARNING("DC global validation failure: %s (%d)",
10310                                        dc_status_to_str(status), status);
10311                         ret = -EINVAL;
10312                         goto fail;
10313                 }
10314         } else {
10315                 /*
10316                  * The commit is a fast update. Fast updates shouldn't change
10317                  * the DC context, affect global validation, and can have their
10318                  * commit work done in parallel with other commits not touching
10319                  * the same resource. If we have a new DC context as part of
10320                  * the DM atomic state from validation we need to free it and
10321                  * retain the existing one instead.
10322                  *
10323                  * Furthermore, since the DM atomic state only contains the DC
10324                  * context and can safely be annulled, we can free the state
10325                  * and clear the associated private object now to free
10326                  * some memory and avoid a possible use-after-free later.
10327                  */
10328
10329                 for (i = 0; i < state->num_private_objs; i++) {
10330                         struct drm_private_obj *obj = state->private_objs[i].ptr;
10331
10332                         if (obj->funcs == adev->dm.atomic_obj.funcs) {
10333                                 int j = state->num_private_objs-1;
10334
10335                                 dm_atomic_destroy_state(obj,
10336                                                 state->private_objs[i].state);
10337
10338                                 /* If i is not at the end of the array then the
10339                                  * last element needs to be moved to where i was
10340                                  * before the array can safely be truncated.
10341                                  */
10342                                 if (i != j)
10343                                         state->private_objs[i] =
10344                                                 state->private_objs[j];
10345
10346                                 state->private_objs[j].ptr = NULL;
10347                                 state->private_objs[j].state = NULL;
10348                                 state->private_objs[j].old_state = NULL;
10349                                 state->private_objs[j].new_state = NULL;
10350
10351                                 state->num_private_objs = j;
10352                                 break;
10353                         }
10354                 }
10355         }
10356
10357         /* Store the overall update type for use later in atomic check. */
10358         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10359                 struct dm_crtc_state *dm_new_crtc_state =
10360                         to_dm_crtc_state(new_crtc_state);
10361
10362                 dm_new_crtc_state->update_type = lock_and_validation_needed ?
10363                                                          UPDATE_TYPE_FULL :
10364                                                          UPDATE_TYPE_FAST;
10365         }
10366
10367         /* Must be success */
10368         WARN_ON(ret);
10369
10370         trace_amdgpu_dm_atomic_check_finish(state, ret);
10371
10372         return ret;
10373
10374 fail:
10375         if (ret == -EDEADLK)
10376                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
10377         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
10378                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
10379         else
10380                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
10381
10382         trace_amdgpu_dm_atomic_check_finish(state, ret);
10383
10384         return ret;
10385 }
10386
10387 static bool is_dp_capable_without_timing_msa(struct dc *dc,
10388                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
10389 {
10390         uint8_t dpcd_data;
10391         bool capable = false;
10392
10393         if (amdgpu_dm_connector->dc_link &&
10394                 dm_helpers_dp_read_dpcd(
10395                                 NULL,
10396                                 amdgpu_dm_connector->dc_link,
10397                                 DP_DOWN_STREAM_PORT_COUNT,
10398                                 &dpcd_data,
10399                                 sizeof(dpcd_data))) {
10400                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10401         }
10402
10403         return capable;
10404 }
10405
10406 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10407                 uint8_t *edid_ext, int len,
10408                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10409 {
10410         int i;
10411         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10412         struct dc *dc = adev->dm.dc;
10413
10414         /* send extension block to DMCU for parsing */
10415         for (i = 0; i < len; i += 8) {
10416                 bool res;
10417                 int offset;
10418
10419                 /* send 8 bytes a time */
10420                 if (!dc_edid_parser_send_cea(dc, i, len, &edid_ext[i], 8))
10421                         return false;
10422
10423                 if (i+8 == len) {
10424                         /* EDID block sent completed, expect result */
10425                         int version, min_rate, max_rate;
10426
10427                         res = dc_edid_parser_recv_amd_vsdb(dc, &version, &min_rate, &max_rate);
10428                         if (res) {
10429                                 /* amd vsdb found */
10430                                 vsdb_info->freesync_supported = 1;
10431                                 vsdb_info->amd_vsdb_version = version;
10432                                 vsdb_info->min_refresh_rate_hz = min_rate;
10433                                 vsdb_info->max_refresh_rate_hz = max_rate;
10434                                 return true;
10435                         }
10436                         /* not amd vsdb */
10437                         return false;
10438                 }
10439
10440                 /* check for ack*/
10441                 res = dc_edid_parser_recv_cea_ack(dc, &offset);
10442                 if (!res)
10443                         return false;
10444         }
10445
10446         return false;
10447 }
10448
10449 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10450                 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10451 {
10452         uint8_t *edid_ext = NULL;
10453         int i;
10454         bool valid_vsdb_found = false;
10455
10456         /*----- drm_find_cea_extension() -----*/
10457         /* No EDID or EDID extensions */
10458         if (edid == NULL || edid->extensions == 0)
10459                 return -ENODEV;
10460
10461         /* Find CEA extension */
10462         for (i = 0; i < edid->extensions; i++) {
10463                 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10464                 if (edid_ext[0] == CEA_EXT)
10465                         break;
10466         }
10467
10468         if (i == edid->extensions)
10469                 return -ENODEV;
10470
10471         /*----- cea_db_offsets() -----*/
10472         if (edid_ext[0] != CEA_EXT)
10473                 return -ENODEV;
10474
10475         valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
10476
10477         return valid_vsdb_found ? i : -ENODEV;
10478 }
10479
10480 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10481                                         struct edid *edid)
10482 {
10483         int i = 0;
10484         struct detailed_timing *timing;
10485         struct detailed_non_pixel *data;
10486         struct detailed_data_monitor_range *range;
10487         struct amdgpu_dm_connector *amdgpu_dm_connector =
10488                         to_amdgpu_dm_connector(connector);
10489         struct dm_connector_state *dm_con_state = NULL;
10490
10491         struct drm_device *dev = connector->dev;
10492         struct amdgpu_device *adev = drm_to_adev(dev);
10493         bool freesync_capable = false;
10494         struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
10495
10496         if (!connector->state) {
10497                 DRM_ERROR("%s - Connector has no state", __func__);
10498                 goto update;
10499         }
10500
10501         if (!edid) {
10502                 dm_con_state = to_dm_connector_state(connector->state);
10503
10504                 amdgpu_dm_connector->min_vfreq = 0;
10505                 amdgpu_dm_connector->max_vfreq = 0;
10506                 amdgpu_dm_connector->pixel_clock_mhz = 0;
10507
10508                 goto update;
10509         }
10510
10511         dm_con_state = to_dm_connector_state(connector->state);
10512
10513         if (!amdgpu_dm_connector->dc_sink) {
10514                 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
10515                 goto update;
10516         }
10517         if (!adev->dm.freesync_module)
10518                 goto update;
10519
10520
10521         if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10522                 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
10523                 bool edid_check_required = false;
10524
10525                 if (edid) {
10526                         edid_check_required = is_dp_capable_without_timing_msa(
10527                                                 adev->dm.dc,
10528                                                 amdgpu_dm_connector);
10529                 }
10530
10531                 if (edid_check_required == true && (edid->version > 1 ||
10532                    (edid->version == 1 && edid->revision > 1))) {
10533                         for (i = 0; i < 4; i++) {
10534
10535                                 timing  = &edid->detailed_timings[i];
10536                                 data    = &timing->data.other_data;
10537                                 range   = &data->data.range;
10538                                 /*
10539                                  * Check if monitor has continuous frequency mode
10540                                  */
10541                                 if (data->type != EDID_DETAIL_MONITOR_RANGE)
10542                                         continue;
10543                                 /*
10544                                  * Check for flag range limits only. If flag == 1 then
10545                                  * no additional timing information provided.
10546                                  * Default GTF, GTF Secondary curve and CVT are not
10547                                  * supported
10548                                  */
10549                                 if (range->flags != 1)
10550                                         continue;
10551
10552                                 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10553                                 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10554                                 amdgpu_dm_connector->pixel_clock_mhz =
10555                                         range->pixel_clock_mhz * 10;
10556
10557                                 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10558                                 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10559
10560                                 break;
10561                         }
10562
10563                         if (amdgpu_dm_connector->max_vfreq -
10564                             amdgpu_dm_connector->min_vfreq > 10) {
10565
10566                                 freesync_capable = true;
10567                         }
10568                 }
10569         } else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10570                 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10571                 if (i >= 0 && vsdb_info.freesync_supported) {
10572                         timing  = &edid->detailed_timings[i];
10573                         data    = &timing->data.other_data;
10574
10575                         amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10576                         amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10577                         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10578                                 freesync_capable = true;
10579
10580                         connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10581                         connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10582                 }
10583         }
10584
10585 update:
10586         if (dm_con_state)
10587                 dm_con_state->freesync_capable = freesync_capable;
10588
10589         if (connector->vrr_capable_property)
10590                 drm_connector_set_vrr_capable_property(connector,
10591                                                        freesync_capable);
10592 }
10593
10594 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
10595 {
10596         uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
10597
10598         if (!(link->connector_signal & SIGNAL_TYPE_EDP))
10599                 return;
10600         if (link->type == dc_connection_none)
10601                 return;
10602         if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
10603                                         dpcd_data, sizeof(dpcd_data))) {
10604                 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
10605
10606                 if (dpcd_data[0] == 0) {
10607                         link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
10608                         link->psr_settings.psr_feature_enabled = false;
10609                 } else {
10610                         link->psr_settings.psr_version = DC_PSR_VERSION_1;
10611                         link->psr_settings.psr_feature_enabled = true;
10612                 }
10613
10614                 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
10615         }
10616 }
10617
10618 /*
10619  * amdgpu_dm_link_setup_psr() - configure psr link
10620  * @stream: stream state
10621  *
10622  * Return: true if success
10623  */
10624 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
10625 {
10626         struct dc_link *link = NULL;
10627         struct psr_config psr_config = {0};
10628         struct psr_context psr_context = {0};
10629         bool ret = false;
10630
10631         if (stream == NULL)
10632                 return false;
10633
10634         link = stream->link;
10635
10636         psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
10637
10638         if (psr_config.psr_version > 0) {
10639                 psr_config.psr_exit_link_training_required = 0x1;
10640                 psr_config.psr_frame_capture_indication_req = 0;
10641                 psr_config.psr_rfb_setup_time = 0x37;
10642                 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
10643                 psr_config.allow_smu_optimizations = 0x0;
10644
10645                 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
10646
10647         }
10648         DRM_DEBUG_DRIVER("PSR link: %d\n",      link->psr_settings.psr_feature_enabled);
10649
10650         return ret;
10651 }
10652
10653 /*
10654  * amdgpu_dm_psr_enable() - enable psr f/w
10655  * @stream: stream state
10656  *
10657  * Return: true if success
10658  */
10659 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
10660 {
10661         struct dc_link *link = stream->link;
10662         unsigned int vsync_rate_hz = 0;
10663         struct dc_static_screen_params params = {0};
10664         /* Calculate number of static frames before generating interrupt to
10665          * enter PSR.
10666          */
10667         // Init fail safe of 2 frames static
10668         unsigned int num_frames_static = 2;
10669
10670         DRM_DEBUG_DRIVER("Enabling psr...\n");
10671
10672         vsync_rate_hz = div64_u64(div64_u64((
10673                         stream->timing.pix_clk_100hz * 100),
10674                         stream->timing.v_total),
10675                         stream->timing.h_total);
10676
10677         /* Round up
10678          * Calculate number of frames such that at least 30 ms of time has
10679          * passed.
10680          */
10681         if (vsync_rate_hz != 0) {
10682                 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
10683                 num_frames_static = (30000 / frame_time_microsec) + 1;
10684         }
10685
10686         params.triggers.cursor_update = true;
10687         params.triggers.overlay_update = true;
10688         params.triggers.surface_update = true;
10689         params.num_frames = num_frames_static;
10690
10691         dc_stream_set_static_screen_params(link->ctx->dc,
10692                                            &stream, 1,
10693                                            &params);
10694
10695         return dc_link_set_psr_allow_active(link, true, false, false);
10696 }
10697
10698 /*
10699  * amdgpu_dm_psr_disable() - disable psr f/w
10700  * @stream:  stream state
10701  *
10702  * Return: true if success
10703  */
10704 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
10705 {
10706
10707         DRM_DEBUG_DRIVER("Disabling psr...\n");
10708
10709         return dc_link_set_psr_allow_active(stream->link, false, true, false);
10710 }
10711
10712 /*
10713  * amdgpu_dm_psr_disable() - disable psr f/w
10714  * if psr is enabled on any stream
10715  *
10716  * Return: true if success
10717  */
10718 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
10719 {
10720         DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
10721         return dc_set_psr_allow_active(dm->dc, false);
10722 }
10723
10724 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10725 {
10726         struct amdgpu_device *adev = drm_to_adev(dev);
10727         struct dc *dc = adev->dm.dc;
10728         int i;
10729
10730         mutex_lock(&adev->dm.dc_lock);
10731         if (dc->current_state) {
10732                 for (i = 0; i < dc->current_state->stream_count; ++i)
10733                         dc->current_state->streams[i]
10734                                 ->triggered_crtc_reset.enabled =
10735                                 adev->dm.force_timing_sync;
10736
10737                 dm_enable_per_frame_crtc_master_sync(dc->current_state);
10738                 dc_trigger_sync(dc, dc->current_state);
10739         }
10740         mutex_unlock(&adev->dm.dc_lock);
10741 }
10742
10743 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10744                        uint32_t value, const char *func_name)
10745 {
10746 #ifdef DM_CHECK_ADDR_0
10747         if (address == 0) {
10748                 DC_ERR("invalid register write. address = 0");
10749                 return;
10750         }
10751 #endif
10752         cgs_write_register(ctx->cgs_device, address, value);
10753         trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10754 }
10755
10756 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10757                           const char *func_name)
10758 {
10759         uint32_t value;
10760 #ifdef DM_CHECK_ADDR_0
10761         if (address == 0) {
10762                 DC_ERR("invalid register read; address = 0\n");
10763                 return 0;
10764         }
10765 #endif
10766
10767         if (ctx->dmub_srv &&
10768             ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10769             !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10770                 ASSERT(false);
10771                 return 0;
10772         }
10773
10774         value = cgs_read_register(ctx->cgs_device, address);
10775
10776         trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10777
10778         return value;
10779 }
10780
10781 int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int linkIndex,
10782                                 struct aux_payload *payload, enum aux_return_code_type *operation_result)
10783 {
10784         struct amdgpu_device *adev = ctx->driver_context;
10785         int ret = 0;
10786
10787         dc_process_dmub_aux_transfer_async(ctx->dc, linkIndex, payload);
10788         ret = wait_for_completion_interruptible_timeout(&adev->dm.dmub_aux_transfer_done, 10*HZ);
10789         if (ret == 0) {
10790                 *operation_result = AUX_RET_ERROR_TIMEOUT;
10791                 return -1;
10792         }
10793         *operation_result = (enum aux_return_code_type)adev->dm.dmub_notify->result;
10794
10795         if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
10796                 (*payload->reply) = adev->dm.dmub_notify->aux_reply.command;
10797
10798                 // For read case, Copy data to payload
10799                 if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
10800                 (*payload->reply == AUX_TRANSACTION_REPLY_AUX_ACK))
10801                         memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
10802                         adev->dm.dmub_notify->aux_reply.length);
10803         }
10804
10805         return adev->dm.dmub_notify->aux_reply.length;
10806 }