drm/connector: Create a helper to attach the hdr_output_metadata property
[platform/kernel/linux-starfive.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "dc/dc_edid_parser.h"
38 #include "amdgpu_dm_trace.h"
39
40 #include "vid.h"
41 #include "amdgpu.h"
42 #include "amdgpu_display.h"
43 #include "amdgpu_ucode.h"
44 #include "atom.h"
45 #include "amdgpu_dm.h"
46 #ifdef CONFIG_DRM_AMD_DC_HDCP
47 #include "amdgpu_dm_hdcp.h"
48 #include <drm/drm_hdcp.h>
49 #endif
50 #include "amdgpu_pm.h"
51
52 #include "amd_shared.h"
53 #include "amdgpu_dm_irq.h"
54 #include "dm_helpers.h"
55 #include "amdgpu_dm_mst_types.h"
56 #if defined(CONFIG_DEBUG_FS)
57 #include "amdgpu_dm_debugfs.h"
58 #endif
59
60 #include "ivsrcid/ivsrcid_vislands30.h"
61
62 #include <linux/module.h>
63 #include <linux/moduleparam.h>
64 #include <linux/types.h>
65 #include <linux/pm_runtime.h>
66 #include <linux/pci.h>
67 #include <linux/firmware.h>
68 #include <linux/component.h>
69
70 #include <drm/drm_atomic.h>
71 #include <drm/drm_atomic_uapi.h>
72 #include <drm/drm_atomic_helper.h>
73 #include <drm/drm_dp_mst_helper.h>
74 #include <drm/drm_fb_helper.h>
75 #include <drm/drm_fourcc.h>
76 #include <drm/drm_edid.h>
77 #include <drm/drm_vblank.h>
78 #include <drm/drm_audio_component.h>
79
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87
88 #include "soc15_common.h"
89 #endif
90
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
98 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
99 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
100 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
101 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
103 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
105 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
107
108 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
109 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
110
111 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
112 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
113
114 /* Number of bytes in PSP header for firmware. */
115 #define PSP_HEADER_BYTES 0x100
116
117 /* Number of bytes in PSP footer for firmware. */
118 #define PSP_FOOTER_BYTES 0x100
119
120 /**
121  * DOC: overview
122  *
123  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
124  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
125  * requests into DC requests, and DC responses into DRM responses.
126  *
127  * The root control structure is &struct amdgpu_display_manager.
128  */
129
130 /* basic init/fini API */
131 static int amdgpu_dm_init(struct amdgpu_device *adev);
132 static void amdgpu_dm_fini(struct amdgpu_device *adev);
133 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
134
135 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
136 {
137         switch (link->dpcd_caps.dongle_type) {
138         case DISPLAY_DONGLE_NONE:
139                 return DRM_MODE_SUBCONNECTOR_Native;
140         case DISPLAY_DONGLE_DP_VGA_CONVERTER:
141                 return DRM_MODE_SUBCONNECTOR_VGA;
142         case DISPLAY_DONGLE_DP_DVI_CONVERTER:
143         case DISPLAY_DONGLE_DP_DVI_DONGLE:
144                 return DRM_MODE_SUBCONNECTOR_DVID;
145         case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
146         case DISPLAY_DONGLE_DP_HDMI_DONGLE:
147                 return DRM_MODE_SUBCONNECTOR_HDMIA;
148         case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
149         default:
150                 return DRM_MODE_SUBCONNECTOR_Unknown;
151         }
152 }
153
154 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
155 {
156         struct dc_link *link = aconnector->dc_link;
157         struct drm_connector *connector = &aconnector->base;
158         enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
159
160         if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
161                 return;
162
163         if (aconnector->dc_sink)
164                 subconnector = get_subconnector_type(link);
165
166         drm_object_property_set_value(&connector->base,
167                         connector->dev->mode_config.dp_subconnector_property,
168                         subconnector);
169 }
170
171 /*
172  * initializes drm_device display related structures, based on the information
173  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
174  * drm_encoder, drm_mode_config
175  *
176  * Returns 0 on success
177  */
178 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
179 /* removes and deallocates the drm structures, created by the above function */
180 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
181
182 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
183                                 struct drm_plane *plane,
184                                 unsigned long possible_crtcs,
185                                 const struct dc_plane_cap *plane_cap);
186 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
187                                struct drm_plane *plane,
188                                uint32_t link_index);
189 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
190                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
191                                     uint32_t link_index,
192                                     struct amdgpu_encoder *amdgpu_encoder);
193 static int amdgpu_dm_encoder_init(struct drm_device *dev,
194                                   struct amdgpu_encoder *aencoder,
195                                   uint32_t link_index);
196
197 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
198
199 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
200
201 static int amdgpu_dm_atomic_check(struct drm_device *dev,
202                                   struct drm_atomic_state *state);
203
204 static void handle_cursor_update(struct drm_plane *plane,
205                                  struct drm_plane_state *old_plane_state);
206
207 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
208 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
209 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
210 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
211 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
212
213 static const struct drm_format_info *
214 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
215
216 static bool
217 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
218                                  struct drm_crtc_state *new_crtc_state);
219 /*
220  * dm_vblank_get_counter
221  *
222  * @brief
223  * Get counter for number of vertical blanks
224  *
225  * @param
226  * struct amdgpu_device *adev - [in] desired amdgpu device
227  * int disp_idx - [in] which CRTC to get the counter from
228  *
229  * @return
230  * Counter for vertical blanks
231  */
232 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
233 {
234         if (crtc >= adev->mode_info.num_crtc)
235                 return 0;
236         else {
237                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
238
239                 if (acrtc->dm_irq_params.stream == NULL) {
240                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
241                                   crtc);
242                         return 0;
243                 }
244
245                 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
246         }
247 }
248
249 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
250                                   u32 *vbl, u32 *position)
251 {
252         uint32_t v_blank_start, v_blank_end, h_position, v_position;
253
254         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
255                 return -EINVAL;
256         else {
257                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
258
259                 if (acrtc->dm_irq_params.stream ==  NULL) {
260                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
261                                   crtc);
262                         return 0;
263                 }
264
265                 /*
266                  * TODO rework base driver to use values directly.
267                  * for now parse it back into reg-format
268                  */
269                 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
270                                          &v_blank_start,
271                                          &v_blank_end,
272                                          &h_position,
273                                          &v_position);
274
275                 *position = v_position | (h_position << 16);
276                 *vbl = v_blank_start | (v_blank_end << 16);
277         }
278
279         return 0;
280 }
281
282 static bool dm_is_idle(void *handle)
283 {
284         /* XXX todo */
285         return true;
286 }
287
288 static int dm_wait_for_idle(void *handle)
289 {
290         /* XXX todo */
291         return 0;
292 }
293
294 static bool dm_check_soft_reset(void *handle)
295 {
296         return false;
297 }
298
299 static int dm_soft_reset(void *handle)
300 {
301         /* XXX todo */
302         return 0;
303 }
304
305 static struct amdgpu_crtc *
306 get_crtc_by_otg_inst(struct amdgpu_device *adev,
307                      int otg_inst)
308 {
309         struct drm_device *dev = adev_to_drm(adev);
310         struct drm_crtc *crtc;
311         struct amdgpu_crtc *amdgpu_crtc;
312
313         if (otg_inst == -1) {
314                 WARN_ON(1);
315                 return adev->mode_info.crtcs[0];
316         }
317
318         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
319                 amdgpu_crtc = to_amdgpu_crtc(crtc);
320
321                 if (amdgpu_crtc->otg_inst == otg_inst)
322                         return amdgpu_crtc;
323         }
324
325         return NULL;
326 }
327
328 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
329 {
330         return acrtc->dm_irq_params.freesync_config.state ==
331                        VRR_STATE_ACTIVE_VARIABLE ||
332                acrtc->dm_irq_params.freesync_config.state ==
333                        VRR_STATE_ACTIVE_FIXED;
334 }
335
336 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
337 {
338         return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
339                dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
340 }
341
342 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
343                                               struct dm_crtc_state *new_state)
344 {
345         if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
346                 return true;
347         else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
348                 return true;
349         else
350                 return false;
351 }
352
353 /**
354  * dm_pflip_high_irq() - Handle pageflip interrupt
355  * @interrupt_params: ignored
356  *
357  * Handles the pageflip interrupt by notifying all interested parties
358  * that the pageflip has been completed.
359  */
360 static void dm_pflip_high_irq(void *interrupt_params)
361 {
362         struct amdgpu_crtc *amdgpu_crtc;
363         struct common_irq_params *irq_params = interrupt_params;
364         struct amdgpu_device *adev = irq_params->adev;
365         unsigned long flags;
366         struct drm_pending_vblank_event *e;
367         uint32_t vpos, hpos, v_blank_start, v_blank_end;
368         bool vrr_active;
369
370         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
371
372         /* IRQ could occur when in initial stage */
373         /* TODO work and BO cleanup */
374         if (amdgpu_crtc == NULL) {
375                 DC_LOG_PFLIP("CRTC is null, returning.\n");
376                 return;
377         }
378
379         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
380
381         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
382                 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
383                                                  amdgpu_crtc->pflip_status,
384                                                  AMDGPU_FLIP_SUBMITTED,
385                                                  amdgpu_crtc->crtc_id,
386                                                  amdgpu_crtc);
387                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
388                 return;
389         }
390
391         /* page flip completed. */
392         e = amdgpu_crtc->event;
393         amdgpu_crtc->event = NULL;
394
395         if (!e)
396                 WARN_ON(1);
397
398         vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
399
400         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
401         if (!vrr_active ||
402             !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
403                                       &v_blank_end, &hpos, &vpos) ||
404             (vpos < v_blank_start)) {
405                 /* Update to correct count and vblank timestamp if racing with
406                  * vblank irq. This also updates to the correct vblank timestamp
407                  * even in VRR mode, as scanout is past the front-porch atm.
408                  */
409                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
410
411                 /* Wake up userspace by sending the pageflip event with proper
412                  * count and timestamp of vblank of flip completion.
413                  */
414                 if (e) {
415                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
416
417                         /* Event sent, so done with vblank for this flip */
418                         drm_crtc_vblank_put(&amdgpu_crtc->base);
419                 }
420         } else if (e) {
421                 /* VRR active and inside front-porch: vblank count and
422                  * timestamp for pageflip event will only be up to date after
423                  * drm_crtc_handle_vblank() has been executed from late vblank
424                  * irq handler after start of back-porch (vline 0). We queue the
425                  * pageflip event for send-out by drm_crtc_handle_vblank() with
426                  * updated timestamp and count, once it runs after us.
427                  *
428                  * We need to open-code this instead of using the helper
429                  * drm_crtc_arm_vblank_event(), as that helper would
430                  * call drm_crtc_accurate_vblank_count(), which we must
431                  * not call in VRR mode while we are in front-porch!
432                  */
433
434                 /* sequence will be replaced by real count during send-out. */
435                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
436                 e->pipe = amdgpu_crtc->crtc_id;
437
438                 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
439                 e = NULL;
440         }
441
442         /* Keep track of vblank of this flip for flip throttling. We use the
443          * cooked hw counter, as that one incremented at start of this vblank
444          * of pageflip completion, so last_flip_vblank is the forbidden count
445          * for queueing new pageflips if vsync + VRR is enabled.
446          */
447         amdgpu_crtc->dm_irq_params.last_flip_vblank =
448                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
449
450         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
451         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
452
453         DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
454                      amdgpu_crtc->crtc_id, amdgpu_crtc,
455                      vrr_active, (int) !e);
456 }
457
458 static void dm_vupdate_high_irq(void *interrupt_params)
459 {
460         struct common_irq_params *irq_params = interrupt_params;
461         struct amdgpu_device *adev = irq_params->adev;
462         struct amdgpu_crtc *acrtc;
463         struct drm_device *drm_dev;
464         struct drm_vblank_crtc *vblank;
465         ktime_t frame_duration_ns, previous_timestamp;
466         unsigned long flags;
467         int vrr_active;
468
469         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
470
471         if (acrtc) {
472                 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
473                 drm_dev = acrtc->base.dev;
474                 vblank = &drm_dev->vblank[acrtc->base.index];
475                 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
476                 frame_duration_ns = vblank->time - previous_timestamp;
477
478                 if (frame_duration_ns > 0) {
479                         trace_amdgpu_refresh_rate_track(acrtc->base.index,
480                                                 frame_duration_ns,
481                                                 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
482                         atomic64_set(&irq_params->previous_timestamp, vblank->time);
483                 }
484
485                 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
486                               acrtc->crtc_id,
487                               vrr_active);
488
489                 /* Core vblank handling is done here after end of front-porch in
490                  * vrr mode, as vblank timestamping will give valid results
491                  * while now done after front-porch. This will also deliver
492                  * page-flip completion events that have been queued to us
493                  * if a pageflip happened inside front-porch.
494                  */
495                 if (vrr_active) {
496                         drm_crtc_handle_vblank(&acrtc->base);
497
498                         /* BTR processing for pre-DCE12 ASICs */
499                         if (acrtc->dm_irq_params.stream &&
500                             adev->family < AMDGPU_FAMILY_AI) {
501                                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
502                                 mod_freesync_handle_v_update(
503                                     adev->dm.freesync_module,
504                                     acrtc->dm_irq_params.stream,
505                                     &acrtc->dm_irq_params.vrr_params);
506
507                                 dc_stream_adjust_vmin_vmax(
508                                     adev->dm.dc,
509                                     acrtc->dm_irq_params.stream,
510                                     &acrtc->dm_irq_params.vrr_params.adjust);
511                                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
512                         }
513                 }
514         }
515 }
516
517 /**
518  * dm_crtc_high_irq() - Handles CRTC interrupt
519  * @interrupt_params: used for determining the CRTC instance
520  *
521  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
522  * event handler.
523  */
524 static void dm_crtc_high_irq(void *interrupt_params)
525 {
526         struct common_irq_params *irq_params = interrupt_params;
527         struct amdgpu_device *adev = irq_params->adev;
528         struct amdgpu_crtc *acrtc;
529         unsigned long flags;
530         int vrr_active;
531
532         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
533         if (!acrtc)
534                 return;
535
536         vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
537
538         DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
539                       vrr_active, acrtc->dm_irq_params.active_planes);
540
541         /**
542          * Core vblank handling at start of front-porch is only possible
543          * in non-vrr mode, as only there vblank timestamping will give
544          * valid results while done in front-porch. Otherwise defer it
545          * to dm_vupdate_high_irq after end of front-porch.
546          */
547         if (!vrr_active)
548                 drm_crtc_handle_vblank(&acrtc->base);
549
550         /**
551          * Following stuff must happen at start of vblank, for crc
552          * computation and below-the-range btr support in vrr mode.
553          */
554         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
555
556         /* BTR updates need to happen before VUPDATE on Vega and above. */
557         if (adev->family < AMDGPU_FAMILY_AI)
558                 return;
559
560         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
561
562         if (acrtc->dm_irq_params.stream &&
563             acrtc->dm_irq_params.vrr_params.supported &&
564             acrtc->dm_irq_params.freesync_config.state ==
565                     VRR_STATE_ACTIVE_VARIABLE) {
566                 mod_freesync_handle_v_update(adev->dm.freesync_module,
567                                              acrtc->dm_irq_params.stream,
568                                              &acrtc->dm_irq_params.vrr_params);
569
570                 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
571                                            &acrtc->dm_irq_params.vrr_params.adjust);
572         }
573
574         /*
575          * If there aren't any active_planes then DCH HUBP may be clock-gated.
576          * In that case, pageflip completion interrupts won't fire and pageflip
577          * completion events won't get delivered. Prevent this by sending
578          * pending pageflip events from here if a flip is still pending.
579          *
580          * If any planes are enabled, use dm_pflip_high_irq() instead, to
581          * avoid race conditions between flip programming and completion,
582          * which could cause too early flip completion events.
583          */
584         if (adev->family >= AMDGPU_FAMILY_RV &&
585             acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
586             acrtc->dm_irq_params.active_planes == 0) {
587                 if (acrtc->event) {
588                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
589                         acrtc->event = NULL;
590                         drm_crtc_vblank_put(&acrtc->base);
591                 }
592                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
593         }
594
595         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
596 }
597
598 #if defined(CONFIG_DRM_AMD_DC_DCN)
599 /**
600  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
601  * DCN generation ASICs
602  * @interrupt params - interrupt parameters
603  *
604  * Used to set crc window/read out crc value at vertical line 0 position
605  */
606 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
607 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
608 {
609         struct common_irq_params *irq_params = interrupt_params;
610         struct amdgpu_device *adev = irq_params->adev;
611         struct amdgpu_crtc *acrtc;
612
613         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
614
615         if (!acrtc)
616                 return;
617
618         amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
619 }
620 #endif
621 #endif
622
623 static int dm_set_clockgating_state(void *handle,
624                   enum amd_clockgating_state state)
625 {
626         return 0;
627 }
628
629 static int dm_set_powergating_state(void *handle,
630                   enum amd_powergating_state state)
631 {
632         return 0;
633 }
634
635 /* Prototypes of private functions */
636 static int dm_early_init(void* handle);
637
638 /* Allocate memory for FBC compressed data  */
639 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
640 {
641         struct drm_device *dev = connector->dev;
642         struct amdgpu_device *adev = drm_to_adev(dev);
643         struct dm_compressor_info *compressor = &adev->dm.compressor;
644         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
645         struct drm_display_mode *mode;
646         unsigned long max_size = 0;
647
648         if (adev->dm.dc->fbc_compressor == NULL)
649                 return;
650
651         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
652                 return;
653
654         if (compressor->bo_ptr)
655                 return;
656
657
658         list_for_each_entry(mode, &connector->modes, head) {
659                 if (max_size < mode->htotal * mode->vtotal)
660                         max_size = mode->htotal * mode->vtotal;
661         }
662
663         if (max_size) {
664                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
665                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
666                             &compressor->gpu_addr, &compressor->cpu_addr);
667
668                 if (r)
669                         DRM_ERROR("DM: Failed to initialize FBC\n");
670                 else {
671                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
672                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
673                 }
674
675         }
676
677 }
678
679 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
680                                           int pipe, bool *enabled,
681                                           unsigned char *buf, int max_bytes)
682 {
683         struct drm_device *dev = dev_get_drvdata(kdev);
684         struct amdgpu_device *adev = drm_to_adev(dev);
685         struct drm_connector *connector;
686         struct drm_connector_list_iter conn_iter;
687         struct amdgpu_dm_connector *aconnector;
688         int ret = 0;
689
690         *enabled = false;
691
692         mutex_lock(&adev->dm.audio_lock);
693
694         drm_connector_list_iter_begin(dev, &conn_iter);
695         drm_for_each_connector_iter(connector, &conn_iter) {
696                 aconnector = to_amdgpu_dm_connector(connector);
697                 if (aconnector->audio_inst != port)
698                         continue;
699
700                 *enabled = true;
701                 ret = drm_eld_size(connector->eld);
702                 memcpy(buf, connector->eld, min(max_bytes, ret));
703
704                 break;
705         }
706         drm_connector_list_iter_end(&conn_iter);
707
708         mutex_unlock(&adev->dm.audio_lock);
709
710         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
711
712         return ret;
713 }
714
715 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
716         .get_eld = amdgpu_dm_audio_component_get_eld,
717 };
718
719 static int amdgpu_dm_audio_component_bind(struct device *kdev,
720                                        struct device *hda_kdev, void *data)
721 {
722         struct drm_device *dev = dev_get_drvdata(kdev);
723         struct amdgpu_device *adev = drm_to_adev(dev);
724         struct drm_audio_component *acomp = data;
725
726         acomp->ops = &amdgpu_dm_audio_component_ops;
727         acomp->dev = kdev;
728         adev->dm.audio_component = acomp;
729
730         return 0;
731 }
732
733 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
734                                           struct device *hda_kdev, void *data)
735 {
736         struct drm_device *dev = dev_get_drvdata(kdev);
737         struct amdgpu_device *adev = drm_to_adev(dev);
738         struct drm_audio_component *acomp = data;
739
740         acomp->ops = NULL;
741         acomp->dev = NULL;
742         adev->dm.audio_component = NULL;
743 }
744
745 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
746         .bind   = amdgpu_dm_audio_component_bind,
747         .unbind = amdgpu_dm_audio_component_unbind,
748 };
749
750 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
751 {
752         int i, ret;
753
754         if (!amdgpu_audio)
755                 return 0;
756
757         adev->mode_info.audio.enabled = true;
758
759         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
760
761         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
762                 adev->mode_info.audio.pin[i].channels = -1;
763                 adev->mode_info.audio.pin[i].rate = -1;
764                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
765                 adev->mode_info.audio.pin[i].status_bits = 0;
766                 adev->mode_info.audio.pin[i].category_code = 0;
767                 adev->mode_info.audio.pin[i].connected = false;
768                 adev->mode_info.audio.pin[i].id =
769                         adev->dm.dc->res_pool->audios[i]->inst;
770                 adev->mode_info.audio.pin[i].offset = 0;
771         }
772
773         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
774         if (ret < 0)
775                 return ret;
776
777         adev->dm.audio_registered = true;
778
779         return 0;
780 }
781
782 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
783 {
784         if (!amdgpu_audio)
785                 return;
786
787         if (!adev->mode_info.audio.enabled)
788                 return;
789
790         if (adev->dm.audio_registered) {
791                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
792                 adev->dm.audio_registered = false;
793         }
794
795         /* TODO: Disable audio? */
796
797         adev->mode_info.audio.enabled = false;
798 }
799
800 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
801 {
802         struct drm_audio_component *acomp = adev->dm.audio_component;
803
804         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
805                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
806
807                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
808                                                  pin, -1);
809         }
810 }
811
812 static int dm_dmub_hw_init(struct amdgpu_device *adev)
813 {
814         const struct dmcub_firmware_header_v1_0 *hdr;
815         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
816         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
817         const struct firmware *dmub_fw = adev->dm.dmub_fw;
818         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
819         struct abm *abm = adev->dm.dc->res_pool->abm;
820         struct dmub_srv_hw_params hw_params;
821         enum dmub_status status;
822         const unsigned char *fw_inst_const, *fw_bss_data;
823         uint32_t i, fw_inst_const_size, fw_bss_data_size;
824         bool has_hw_support;
825
826         if (!dmub_srv)
827                 /* DMUB isn't supported on the ASIC. */
828                 return 0;
829
830         if (!fb_info) {
831                 DRM_ERROR("No framebuffer info for DMUB service.\n");
832                 return -EINVAL;
833         }
834
835         if (!dmub_fw) {
836                 /* Firmware required for DMUB support. */
837                 DRM_ERROR("No firmware provided for DMUB.\n");
838                 return -EINVAL;
839         }
840
841         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
842         if (status != DMUB_STATUS_OK) {
843                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
844                 return -EINVAL;
845         }
846
847         if (!has_hw_support) {
848                 DRM_INFO("DMUB unsupported on ASIC\n");
849                 return 0;
850         }
851
852         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
853
854         fw_inst_const = dmub_fw->data +
855                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
856                         PSP_HEADER_BYTES;
857
858         fw_bss_data = dmub_fw->data +
859                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
860                       le32_to_cpu(hdr->inst_const_bytes);
861
862         /* Copy firmware and bios info into FB memory. */
863         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
864                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
865
866         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
867
868         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
869          * amdgpu_ucode_init_single_fw will load dmub firmware
870          * fw_inst_const part to cw0; otherwise, the firmware back door load
871          * will be done by dm_dmub_hw_init
872          */
873         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
874                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
875                                 fw_inst_const_size);
876         }
877
878         if (fw_bss_data_size)
879                 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
880                        fw_bss_data, fw_bss_data_size);
881
882         /* Copy firmware bios info into FB memory. */
883         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
884                adev->bios_size);
885
886         /* Reset regions that need to be reset. */
887         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
888         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
889
890         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
891                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
892
893         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
894                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
895
896         /* Initialize hardware. */
897         memset(&hw_params, 0, sizeof(hw_params));
898         hw_params.fb_base = adev->gmc.fb_start;
899         hw_params.fb_offset = adev->gmc.aper_base;
900
901         /* backdoor load firmware and trigger dmub running */
902         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
903                 hw_params.load_inst_const = true;
904
905         if (dmcu)
906                 hw_params.psp_version = dmcu->psp_version;
907
908         for (i = 0; i < fb_info->num_fb; ++i)
909                 hw_params.fb[i] = &fb_info->fb[i];
910
911         status = dmub_srv_hw_init(dmub_srv, &hw_params);
912         if (status != DMUB_STATUS_OK) {
913                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
914                 return -EINVAL;
915         }
916
917         /* Wait for firmware load to finish. */
918         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
919         if (status != DMUB_STATUS_OK)
920                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
921
922         /* Init DMCU and ABM if available. */
923         if (dmcu && abm) {
924                 dmcu->funcs->dmcu_init(dmcu);
925                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
926         }
927
928         adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
929         if (!adev->dm.dc->ctx->dmub_srv) {
930                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
931                 return -ENOMEM;
932         }
933
934         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
935                  adev->dm.dmcub_fw_version);
936
937         return 0;
938 }
939
940 #if defined(CONFIG_DRM_AMD_DC_DCN)
941 #define DMUB_TRACE_MAX_READ 64
942 static void dm_dmub_trace_high_irq(void *interrupt_params)
943 {
944         struct common_irq_params *irq_params = interrupt_params;
945         struct amdgpu_device *adev = irq_params->adev;
946         struct amdgpu_display_manager *dm = &adev->dm;
947         struct dmcub_trace_buf_entry entry = { 0 };
948         uint32_t count = 0;
949
950         do {
951                 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
952                         trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
953                                                         entry.param0, entry.param1);
954
955                         DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
956                                  entry.trace_code, entry.tick_count, entry.param0, entry.param1);
957                 } else
958                         break;
959
960                 count++;
961
962         } while (count <= DMUB_TRACE_MAX_READ);
963
964         ASSERT(count <= DMUB_TRACE_MAX_READ);
965 }
966
967 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
968 {
969         uint64_t pt_base;
970         uint32_t logical_addr_low;
971         uint32_t logical_addr_high;
972         uint32_t agp_base, agp_bot, agp_top;
973         PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
974
975         logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
976         pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
977
978         if (adev->apu_flags & AMD_APU_IS_RAVEN2)
979                 /*
980                  * Raven2 has a HW issue that it is unable to use the vram which
981                  * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
982                  * workaround that increase system aperture high address (add 1)
983                  * to get rid of the VM fault and hardware hang.
984                  */
985                 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
986         else
987                 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
988
989         agp_base = 0;
990         agp_bot = adev->gmc.agp_start >> 24;
991         agp_top = adev->gmc.agp_end >> 24;
992
993
994         page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
995         page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
996         page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
997         page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
998         page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
999         page_table_base.low_part = lower_32_bits(pt_base);
1000
1001         pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1002         pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1003
1004         pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1005         pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1006         pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1007
1008         pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1009         pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1010         pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1011
1012         pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1013         pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1014         pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1015
1016         pa_config->is_hvm_enabled = 0;
1017
1018 }
1019 #endif
1020 #if defined(CONFIG_DRM_AMD_DC_DCN)
1021 static void event_mall_stutter(struct work_struct *work)
1022 {
1023
1024         struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
1025         struct amdgpu_display_manager *dm = vblank_work->dm;
1026
1027         mutex_lock(&dm->dc_lock);
1028
1029         if (vblank_work->enable)
1030                 dm->active_vblank_irq_count++;
1031         else if(dm->active_vblank_irq_count)
1032                 dm->active_vblank_irq_count--;
1033
1034         dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1035
1036         DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1037
1038         mutex_unlock(&dm->dc_lock);
1039 }
1040
1041 static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
1042 {
1043
1044         int max_caps = dc->caps.max_links;
1045         struct vblank_workqueue *vblank_work;
1046         int i = 0;
1047
1048         vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
1049         if (ZERO_OR_NULL_PTR(vblank_work)) {
1050                 kfree(vblank_work);
1051                 return NULL;
1052         }
1053
1054         for (i = 0; i < max_caps; i++)
1055                 INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
1056
1057         return vblank_work;
1058 }
1059 #endif
1060 static int amdgpu_dm_init(struct amdgpu_device *adev)
1061 {
1062         struct dc_init_data init_data;
1063 #ifdef CONFIG_DRM_AMD_DC_HDCP
1064         struct dc_callback_init init_params;
1065 #endif
1066         int r;
1067
1068         adev->dm.ddev = adev_to_drm(adev);
1069         adev->dm.adev = adev;
1070
1071         /* Zero all the fields */
1072         memset(&init_data, 0, sizeof(init_data));
1073 #ifdef CONFIG_DRM_AMD_DC_HDCP
1074         memset(&init_params, 0, sizeof(init_params));
1075 #endif
1076
1077         mutex_init(&adev->dm.dc_lock);
1078         mutex_init(&adev->dm.audio_lock);
1079 #if defined(CONFIG_DRM_AMD_DC_DCN)
1080         spin_lock_init(&adev->dm.vblank_lock);
1081 #endif
1082
1083         if(amdgpu_dm_irq_init(adev)) {
1084                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1085                 goto error;
1086         }
1087
1088         init_data.asic_id.chip_family = adev->family;
1089
1090         init_data.asic_id.pci_revision_id = adev->pdev->revision;
1091         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1092
1093         init_data.asic_id.vram_width = adev->gmc.vram_width;
1094         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1095         init_data.asic_id.atombios_base_address =
1096                 adev->mode_info.atom_context->bios;
1097
1098         init_data.driver = adev;
1099
1100         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1101
1102         if (!adev->dm.cgs_device) {
1103                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1104                 goto error;
1105         }
1106
1107         init_data.cgs_device = adev->dm.cgs_device;
1108
1109         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1110
1111         switch (adev->asic_type) {
1112         case CHIP_CARRIZO:
1113         case CHIP_STONEY:
1114         case CHIP_RAVEN:
1115         case CHIP_RENOIR:
1116                 init_data.flags.gpu_vm_support = true;
1117                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1118                         init_data.flags.disable_dmcu = true;
1119                 break;
1120 #if defined(CONFIG_DRM_AMD_DC_DCN)
1121         case CHIP_VANGOGH:
1122                 init_data.flags.gpu_vm_support = true;
1123                 break;
1124 #endif
1125         default:
1126                 break;
1127         }
1128
1129         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1130                 init_data.flags.fbc_support = true;
1131
1132         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1133                 init_data.flags.multi_mon_pp_mclk_switch = true;
1134
1135         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1136                 init_data.flags.disable_fractional_pwm = true;
1137
1138         init_data.flags.power_down_display_on_boot = true;
1139
1140         INIT_LIST_HEAD(&adev->dm.da_list);
1141         /* Display Core create. */
1142         adev->dm.dc = dc_create(&init_data);
1143
1144         if (adev->dm.dc) {
1145                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1146         } else {
1147                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1148                 goto error;
1149         }
1150
1151         if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1152                 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1153                 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1154         }
1155
1156         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1157                 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1158
1159         if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1160                 adev->dm.dc->debug.disable_stutter = true;
1161
1162         if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1163                 adev->dm.dc->debug.disable_dsc = true;
1164
1165         if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1166                 adev->dm.dc->debug.disable_clock_gate = true;
1167
1168         r = dm_dmub_hw_init(adev);
1169         if (r) {
1170                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1171                 goto error;
1172         }
1173
1174         dc_hardware_init(adev->dm.dc);
1175
1176 #if defined(CONFIG_DRM_AMD_DC_DCN)
1177         if (adev->apu_flags) {
1178                 struct dc_phy_addr_space_config pa_config;
1179
1180                 mmhub_read_system_context(adev, &pa_config);
1181
1182                 // Call the DC init_memory func
1183                 dc_setup_system_context(adev->dm.dc, &pa_config);
1184         }
1185 #endif
1186
1187         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1188         if (!adev->dm.freesync_module) {
1189                 DRM_ERROR(
1190                 "amdgpu: failed to initialize freesync_module.\n");
1191         } else
1192                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1193                                 adev->dm.freesync_module);
1194
1195         amdgpu_dm_init_color_mod();
1196
1197 #if defined(CONFIG_DRM_AMD_DC_DCN)
1198         if (adev->dm.dc->caps.max_links > 0) {
1199                 adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
1200
1201                 if (!adev->dm.vblank_workqueue)
1202                         DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1203                 else
1204                         DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
1205         }
1206 #endif
1207
1208 #ifdef CONFIG_DRM_AMD_DC_HDCP
1209         if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1210                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1211
1212                 if (!adev->dm.hdcp_workqueue)
1213                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1214                 else
1215                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1216
1217                 dc_init_callbacks(adev->dm.dc, &init_params);
1218         }
1219 #endif
1220 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1221         adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1222 #endif
1223         if (amdgpu_dm_initialize_drm_device(adev)) {
1224                 DRM_ERROR(
1225                 "amdgpu: failed to initialize sw for display support.\n");
1226                 goto error;
1227         }
1228
1229         /* create fake encoders for MST */
1230         dm_dp_create_fake_mst_encoders(adev);
1231
1232         /* TODO: Add_display_info? */
1233
1234         /* TODO use dynamic cursor width */
1235         adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1236         adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1237
1238         if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1239                 DRM_ERROR(
1240                 "amdgpu: failed to initialize sw for display support.\n");
1241                 goto error;
1242         }
1243
1244
1245         DRM_DEBUG_DRIVER("KMS initialized.\n");
1246
1247         return 0;
1248 error:
1249         amdgpu_dm_fini(adev);
1250
1251         return -EINVAL;
1252 }
1253
1254 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1255 {
1256         int i;
1257
1258         for (i = 0; i < adev->dm.display_indexes_num; i++) {
1259                 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1260         }
1261
1262         amdgpu_dm_audio_fini(adev);
1263
1264         amdgpu_dm_destroy_drm_device(&adev->dm);
1265
1266 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1267         if (adev->dm.crc_rd_wrk) {
1268                 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1269                 kfree(adev->dm.crc_rd_wrk);
1270                 adev->dm.crc_rd_wrk = NULL;
1271         }
1272 #endif
1273 #ifdef CONFIG_DRM_AMD_DC_HDCP
1274         if (adev->dm.hdcp_workqueue) {
1275                 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1276                 adev->dm.hdcp_workqueue = NULL;
1277         }
1278
1279         if (adev->dm.dc)
1280                 dc_deinit_callbacks(adev->dm.dc);
1281 #endif
1282
1283 #if defined(CONFIG_DRM_AMD_DC_DCN)
1284         if (adev->dm.vblank_workqueue) {
1285                 adev->dm.vblank_workqueue->dm = NULL;
1286                 kfree(adev->dm.vblank_workqueue);
1287                 adev->dm.vblank_workqueue = NULL;
1288         }
1289 #endif
1290
1291         if (adev->dm.dc->ctx->dmub_srv) {
1292                 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1293                 adev->dm.dc->ctx->dmub_srv = NULL;
1294         }
1295
1296         if (adev->dm.dmub_bo)
1297                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1298                                       &adev->dm.dmub_bo_gpu_addr,
1299                                       &adev->dm.dmub_bo_cpu_addr);
1300
1301         /* DC Destroy TODO: Replace destroy DAL */
1302         if (adev->dm.dc)
1303                 dc_destroy(&adev->dm.dc);
1304         /*
1305          * TODO: pageflip, vlank interrupt
1306          *
1307          * amdgpu_dm_irq_fini(adev);
1308          */
1309
1310         if (adev->dm.cgs_device) {
1311                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1312                 adev->dm.cgs_device = NULL;
1313         }
1314         if (adev->dm.freesync_module) {
1315                 mod_freesync_destroy(adev->dm.freesync_module);
1316                 adev->dm.freesync_module = NULL;
1317         }
1318
1319         mutex_destroy(&adev->dm.audio_lock);
1320         mutex_destroy(&adev->dm.dc_lock);
1321
1322         return;
1323 }
1324
1325 static int load_dmcu_fw(struct amdgpu_device *adev)
1326 {
1327         const char *fw_name_dmcu = NULL;
1328         int r;
1329         const struct dmcu_firmware_header_v1_0 *hdr;
1330
1331         switch(adev->asic_type) {
1332 #if defined(CONFIG_DRM_AMD_DC_SI)
1333         case CHIP_TAHITI:
1334         case CHIP_PITCAIRN:
1335         case CHIP_VERDE:
1336         case CHIP_OLAND:
1337 #endif
1338         case CHIP_BONAIRE:
1339         case CHIP_HAWAII:
1340         case CHIP_KAVERI:
1341         case CHIP_KABINI:
1342         case CHIP_MULLINS:
1343         case CHIP_TONGA:
1344         case CHIP_FIJI:
1345         case CHIP_CARRIZO:
1346         case CHIP_STONEY:
1347         case CHIP_POLARIS11:
1348         case CHIP_POLARIS10:
1349         case CHIP_POLARIS12:
1350         case CHIP_VEGAM:
1351         case CHIP_VEGA10:
1352         case CHIP_VEGA12:
1353         case CHIP_VEGA20:
1354         case CHIP_NAVI10:
1355         case CHIP_NAVI14:
1356         case CHIP_RENOIR:
1357         case CHIP_SIENNA_CICHLID:
1358         case CHIP_NAVY_FLOUNDER:
1359         case CHIP_DIMGREY_CAVEFISH:
1360         case CHIP_VANGOGH:
1361                 return 0;
1362         case CHIP_NAVI12:
1363                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1364                 break;
1365         case CHIP_RAVEN:
1366                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1367                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1368                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1369                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1370                 else
1371                         return 0;
1372                 break;
1373         default:
1374                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1375                 return -EINVAL;
1376         }
1377
1378         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1379                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1380                 return 0;
1381         }
1382
1383         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1384         if (r == -ENOENT) {
1385                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1386                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1387                 adev->dm.fw_dmcu = NULL;
1388                 return 0;
1389         }
1390         if (r) {
1391                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1392                         fw_name_dmcu);
1393                 return r;
1394         }
1395
1396         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1397         if (r) {
1398                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1399                         fw_name_dmcu);
1400                 release_firmware(adev->dm.fw_dmcu);
1401                 adev->dm.fw_dmcu = NULL;
1402                 return r;
1403         }
1404
1405         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1406         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1407         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1408         adev->firmware.fw_size +=
1409                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1410
1411         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1412         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1413         adev->firmware.fw_size +=
1414                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1415
1416         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1417
1418         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1419
1420         return 0;
1421 }
1422
1423 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1424 {
1425         struct amdgpu_device *adev = ctx;
1426
1427         return dm_read_reg(adev->dm.dc->ctx, address);
1428 }
1429
1430 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1431                                      uint32_t value)
1432 {
1433         struct amdgpu_device *adev = ctx;
1434
1435         return dm_write_reg(adev->dm.dc->ctx, address, value);
1436 }
1437
1438 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1439 {
1440         struct dmub_srv_create_params create_params;
1441         struct dmub_srv_region_params region_params;
1442         struct dmub_srv_region_info region_info;
1443         struct dmub_srv_fb_params fb_params;
1444         struct dmub_srv_fb_info *fb_info;
1445         struct dmub_srv *dmub_srv;
1446         const struct dmcub_firmware_header_v1_0 *hdr;
1447         const char *fw_name_dmub;
1448         enum dmub_asic dmub_asic;
1449         enum dmub_status status;
1450         int r;
1451
1452         switch (adev->asic_type) {
1453         case CHIP_RENOIR:
1454                 dmub_asic = DMUB_ASIC_DCN21;
1455                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1456                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1457                         fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1458                 break;
1459         case CHIP_SIENNA_CICHLID:
1460                 dmub_asic = DMUB_ASIC_DCN30;
1461                 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1462                 break;
1463         case CHIP_NAVY_FLOUNDER:
1464                 dmub_asic = DMUB_ASIC_DCN30;
1465                 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1466                 break;
1467         case CHIP_VANGOGH:
1468                 dmub_asic = DMUB_ASIC_DCN301;
1469                 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1470                 break;
1471         case CHIP_DIMGREY_CAVEFISH:
1472                 dmub_asic = DMUB_ASIC_DCN302;
1473                 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1474                 break;
1475
1476         default:
1477                 /* ASIC doesn't support DMUB. */
1478                 return 0;
1479         }
1480
1481         r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1482         if (r) {
1483                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1484                 return 0;
1485         }
1486
1487         r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1488         if (r) {
1489                 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1490                 return 0;
1491         }
1492
1493         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1494
1495         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1496                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1497                         AMDGPU_UCODE_ID_DMCUB;
1498                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1499                         adev->dm.dmub_fw;
1500                 adev->firmware.fw_size +=
1501                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1502
1503                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1504                          adev->dm.dmcub_fw_version);
1505         }
1506
1507         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1508
1509         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1510         dmub_srv = adev->dm.dmub_srv;
1511
1512         if (!dmub_srv) {
1513                 DRM_ERROR("Failed to allocate DMUB service!\n");
1514                 return -ENOMEM;
1515         }
1516
1517         memset(&create_params, 0, sizeof(create_params));
1518         create_params.user_ctx = adev;
1519         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1520         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1521         create_params.asic = dmub_asic;
1522
1523         /* Create the DMUB service. */
1524         status = dmub_srv_create(dmub_srv, &create_params);
1525         if (status != DMUB_STATUS_OK) {
1526                 DRM_ERROR("Error creating DMUB service: %d\n", status);
1527                 return -EINVAL;
1528         }
1529
1530         /* Calculate the size of all the regions for the DMUB service. */
1531         memset(&region_params, 0, sizeof(region_params));
1532
1533         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1534                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1535         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1536         region_params.vbios_size = adev->bios_size;
1537         region_params.fw_bss_data = region_params.bss_data_size ?
1538                 adev->dm.dmub_fw->data +
1539                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1540                 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1541         region_params.fw_inst_const =
1542                 adev->dm.dmub_fw->data +
1543                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1544                 PSP_HEADER_BYTES;
1545
1546         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1547                                            &region_info);
1548
1549         if (status != DMUB_STATUS_OK) {
1550                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1551                 return -EINVAL;
1552         }
1553
1554         /*
1555          * Allocate a framebuffer based on the total size of all the regions.
1556          * TODO: Move this into GART.
1557          */
1558         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1559                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1560                                     &adev->dm.dmub_bo_gpu_addr,
1561                                     &adev->dm.dmub_bo_cpu_addr);
1562         if (r)
1563                 return r;
1564
1565         /* Rebase the regions on the framebuffer address. */
1566         memset(&fb_params, 0, sizeof(fb_params));
1567         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1568         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1569         fb_params.region_info = &region_info;
1570
1571         adev->dm.dmub_fb_info =
1572                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1573         fb_info = adev->dm.dmub_fb_info;
1574
1575         if (!fb_info) {
1576                 DRM_ERROR(
1577                         "Failed to allocate framebuffer info for DMUB service!\n");
1578                 return -ENOMEM;
1579         }
1580
1581         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1582         if (status != DMUB_STATUS_OK) {
1583                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1584                 return -EINVAL;
1585         }
1586
1587         return 0;
1588 }
1589
1590 static int dm_sw_init(void *handle)
1591 {
1592         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1593         int r;
1594
1595         r = dm_dmub_sw_init(adev);
1596         if (r)
1597                 return r;
1598
1599         return load_dmcu_fw(adev);
1600 }
1601
1602 static int dm_sw_fini(void *handle)
1603 {
1604         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1605
1606         kfree(adev->dm.dmub_fb_info);
1607         adev->dm.dmub_fb_info = NULL;
1608
1609         if (adev->dm.dmub_srv) {
1610                 dmub_srv_destroy(adev->dm.dmub_srv);
1611                 adev->dm.dmub_srv = NULL;
1612         }
1613
1614         release_firmware(adev->dm.dmub_fw);
1615         adev->dm.dmub_fw = NULL;
1616
1617         release_firmware(adev->dm.fw_dmcu);
1618         adev->dm.fw_dmcu = NULL;
1619
1620         return 0;
1621 }
1622
1623 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1624 {
1625         struct amdgpu_dm_connector *aconnector;
1626         struct drm_connector *connector;
1627         struct drm_connector_list_iter iter;
1628         int ret = 0;
1629
1630         drm_connector_list_iter_begin(dev, &iter);
1631         drm_for_each_connector_iter(connector, &iter) {
1632                 aconnector = to_amdgpu_dm_connector(connector);
1633                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1634                     aconnector->mst_mgr.aux) {
1635                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1636                                          aconnector,
1637                                          aconnector->base.base.id);
1638
1639                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1640                         if (ret < 0) {
1641                                 DRM_ERROR("DM_MST: Failed to start MST\n");
1642                                 aconnector->dc_link->type =
1643                                         dc_connection_single;
1644                                 break;
1645                         }
1646                 }
1647         }
1648         drm_connector_list_iter_end(&iter);
1649
1650         return ret;
1651 }
1652
1653 static int dm_late_init(void *handle)
1654 {
1655         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1656
1657         struct dmcu_iram_parameters params;
1658         unsigned int linear_lut[16];
1659         int i;
1660         struct dmcu *dmcu = NULL;
1661         bool ret = true;
1662
1663         dmcu = adev->dm.dc->res_pool->dmcu;
1664
1665         for (i = 0; i < 16; i++)
1666                 linear_lut[i] = 0xFFFF * i / 15;
1667
1668         params.set = 0;
1669         params.backlight_ramping_start = 0xCCCC;
1670         params.backlight_ramping_reduction = 0xCCCCCCCC;
1671         params.backlight_lut_array_size = 16;
1672         params.backlight_lut_array = linear_lut;
1673
1674         /* Min backlight level after ABM reduction,  Don't allow below 1%
1675          * 0xFFFF x 0.01 = 0x28F
1676          */
1677         params.min_abm_backlight = 0x28F;
1678
1679         /* In the case where abm is implemented on dmcub,
1680          * dmcu object will be null.
1681          * ABM 2.4 and up are implemented on dmcub.
1682          */
1683         if (dmcu)
1684                 ret = dmcu_load_iram(dmcu, params);
1685         else if (adev->dm.dc->ctx->dmub_srv)
1686                 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1687
1688         if (!ret)
1689                 return -EINVAL;
1690
1691         return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1692 }
1693
1694 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1695 {
1696         struct amdgpu_dm_connector *aconnector;
1697         struct drm_connector *connector;
1698         struct drm_connector_list_iter iter;
1699         struct drm_dp_mst_topology_mgr *mgr;
1700         int ret;
1701         bool need_hotplug = false;
1702
1703         drm_connector_list_iter_begin(dev, &iter);
1704         drm_for_each_connector_iter(connector, &iter) {
1705                 aconnector = to_amdgpu_dm_connector(connector);
1706                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1707                     aconnector->mst_port)
1708                         continue;
1709
1710                 mgr = &aconnector->mst_mgr;
1711
1712                 if (suspend) {
1713                         drm_dp_mst_topology_mgr_suspend(mgr);
1714                 } else {
1715                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1716                         if (ret < 0) {
1717                                 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1718                                 need_hotplug = true;
1719                         }
1720                 }
1721         }
1722         drm_connector_list_iter_end(&iter);
1723
1724         if (need_hotplug)
1725                 drm_kms_helper_hotplug_event(dev);
1726 }
1727
1728 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1729 {
1730         struct smu_context *smu = &adev->smu;
1731         int ret = 0;
1732
1733         if (!is_support_sw_smu(adev))
1734                 return 0;
1735
1736         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1737          * on window driver dc implementation.
1738          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1739          * should be passed to smu during boot up and resume from s3.
1740          * boot up: dc calculate dcn watermark clock settings within dc_create,
1741          * dcn20_resource_construct
1742          * then call pplib functions below to pass the settings to smu:
1743          * smu_set_watermarks_for_clock_ranges
1744          * smu_set_watermarks_table
1745          * navi10_set_watermarks_table
1746          * smu_write_watermarks_table
1747          *
1748          * For Renoir, clock settings of dcn watermark are also fixed values.
1749          * dc has implemented different flow for window driver:
1750          * dc_hardware_init / dc_set_power_state
1751          * dcn10_init_hw
1752          * notify_wm_ranges
1753          * set_wm_ranges
1754          * -- Linux
1755          * smu_set_watermarks_for_clock_ranges
1756          * renoir_set_watermarks_table
1757          * smu_write_watermarks_table
1758          *
1759          * For Linux,
1760          * dc_hardware_init -> amdgpu_dm_init
1761          * dc_set_power_state --> dm_resume
1762          *
1763          * therefore, this function apply to navi10/12/14 but not Renoir
1764          * *
1765          */
1766         switch(adev->asic_type) {
1767         case CHIP_NAVI10:
1768         case CHIP_NAVI14:
1769         case CHIP_NAVI12:
1770                 break;
1771         default:
1772                 return 0;
1773         }
1774
1775         ret = smu_write_watermarks_table(smu);
1776         if (ret) {
1777                 DRM_ERROR("Failed to update WMTABLE!\n");
1778                 return ret;
1779         }
1780
1781         return 0;
1782 }
1783
1784 /**
1785  * dm_hw_init() - Initialize DC device
1786  * @handle: The base driver device containing the amdgpu_dm device.
1787  *
1788  * Initialize the &struct amdgpu_display_manager device. This involves calling
1789  * the initializers of each DM component, then populating the struct with them.
1790  *
1791  * Although the function implies hardware initialization, both hardware and
1792  * software are initialized here. Splitting them out to their relevant init
1793  * hooks is a future TODO item.
1794  *
1795  * Some notable things that are initialized here:
1796  *
1797  * - Display Core, both software and hardware
1798  * - DC modules that we need (freesync and color management)
1799  * - DRM software states
1800  * - Interrupt sources and handlers
1801  * - Vblank support
1802  * - Debug FS entries, if enabled
1803  */
1804 static int dm_hw_init(void *handle)
1805 {
1806         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1807         /* Create DAL display manager */
1808         amdgpu_dm_init(adev);
1809         amdgpu_dm_hpd_init(adev);
1810
1811         return 0;
1812 }
1813
1814 /**
1815  * dm_hw_fini() - Teardown DC device
1816  * @handle: The base driver device containing the amdgpu_dm device.
1817  *
1818  * Teardown components within &struct amdgpu_display_manager that require
1819  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1820  * were loaded. Also flush IRQ workqueues and disable them.
1821  */
1822 static int dm_hw_fini(void *handle)
1823 {
1824         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1825
1826         amdgpu_dm_hpd_fini(adev);
1827
1828         amdgpu_dm_irq_fini(adev);
1829         amdgpu_dm_fini(adev);
1830         return 0;
1831 }
1832
1833
1834 static int dm_enable_vblank(struct drm_crtc *crtc);
1835 static void dm_disable_vblank(struct drm_crtc *crtc);
1836
1837 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1838                                  struct dc_state *state, bool enable)
1839 {
1840         enum dc_irq_source irq_source;
1841         struct amdgpu_crtc *acrtc;
1842         int rc = -EBUSY;
1843         int i = 0;
1844
1845         for (i = 0; i < state->stream_count; i++) {
1846                 acrtc = get_crtc_by_otg_inst(
1847                                 adev, state->stream_status[i].primary_otg_inst);
1848
1849                 if (acrtc && state->stream_status[i].plane_count != 0) {
1850                         irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1851                         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1852                         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
1853                                       acrtc->crtc_id, enable ? "en" : "dis", rc);
1854                         if (rc)
1855                                 DRM_WARN("Failed to %s pflip interrupts\n",
1856                                          enable ? "enable" : "disable");
1857
1858                         if (enable) {
1859                                 rc = dm_enable_vblank(&acrtc->base);
1860                                 if (rc)
1861                                         DRM_WARN("Failed to enable vblank interrupts\n");
1862                         } else {
1863                                 dm_disable_vblank(&acrtc->base);
1864                         }
1865
1866                 }
1867         }
1868
1869 }
1870
1871 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1872 {
1873         struct dc_state *context = NULL;
1874         enum dc_status res = DC_ERROR_UNEXPECTED;
1875         int i;
1876         struct dc_stream_state *del_streams[MAX_PIPES];
1877         int del_streams_count = 0;
1878
1879         memset(del_streams, 0, sizeof(del_streams));
1880
1881         context = dc_create_state(dc);
1882         if (context == NULL)
1883                 goto context_alloc_fail;
1884
1885         dc_resource_state_copy_construct_current(dc, context);
1886
1887         /* First remove from context all streams */
1888         for (i = 0; i < context->stream_count; i++) {
1889                 struct dc_stream_state *stream = context->streams[i];
1890
1891                 del_streams[del_streams_count++] = stream;
1892         }
1893
1894         /* Remove all planes for removed streams and then remove the streams */
1895         for (i = 0; i < del_streams_count; i++) {
1896                 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1897                         res = DC_FAIL_DETACH_SURFACES;
1898                         goto fail;
1899                 }
1900
1901                 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1902                 if (res != DC_OK)
1903                         goto fail;
1904         }
1905
1906
1907         res = dc_validate_global_state(dc, context, false);
1908
1909         if (res != DC_OK) {
1910                 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1911                 goto fail;
1912         }
1913
1914         res = dc_commit_state(dc, context);
1915
1916 fail:
1917         dc_release_state(context);
1918
1919 context_alloc_fail:
1920         return res;
1921 }
1922
1923 static int dm_suspend(void *handle)
1924 {
1925         struct amdgpu_device *adev = handle;
1926         struct amdgpu_display_manager *dm = &adev->dm;
1927         int ret = 0;
1928
1929         if (amdgpu_in_reset(adev)) {
1930                 mutex_lock(&dm->dc_lock);
1931
1932 #if defined(CONFIG_DRM_AMD_DC_DCN)
1933                 dc_allow_idle_optimizations(adev->dm.dc, false);
1934 #endif
1935
1936                 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1937
1938                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1939
1940                 amdgpu_dm_commit_zero_streams(dm->dc);
1941
1942                 amdgpu_dm_irq_suspend(adev);
1943
1944                 return ret;
1945         }
1946
1947 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
1948         amdgpu_dm_crtc_secure_display_suspend(adev);
1949 #endif
1950         WARN_ON(adev->dm.cached_state);
1951         adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1952
1953         s3_handle_mst(adev_to_drm(adev), true);
1954
1955         amdgpu_dm_irq_suspend(adev);
1956
1957
1958         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1959
1960         return 0;
1961 }
1962
1963 static struct amdgpu_dm_connector *
1964 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1965                                              struct drm_crtc *crtc)
1966 {
1967         uint32_t i;
1968         struct drm_connector_state *new_con_state;
1969         struct drm_connector *connector;
1970         struct drm_crtc *crtc_from_state;
1971
1972         for_each_new_connector_in_state(state, connector, new_con_state, i) {
1973                 crtc_from_state = new_con_state->crtc;
1974
1975                 if (crtc_from_state == crtc)
1976                         return to_amdgpu_dm_connector(connector);
1977         }
1978
1979         return NULL;
1980 }
1981
1982 static void emulated_link_detect(struct dc_link *link)
1983 {
1984         struct dc_sink_init_data sink_init_data = { 0 };
1985         struct display_sink_capability sink_caps = { 0 };
1986         enum dc_edid_status edid_status;
1987         struct dc_context *dc_ctx = link->ctx;
1988         struct dc_sink *sink = NULL;
1989         struct dc_sink *prev_sink = NULL;
1990
1991         link->type = dc_connection_none;
1992         prev_sink = link->local_sink;
1993
1994         if (prev_sink)
1995                 dc_sink_release(prev_sink);
1996
1997         switch (link->connector_signal) {
1998         case SIGNAL_TYPE_HDMI_TYPE_A: {
1999                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2000                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2001                 break;
2002         }
2003
2004         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2005                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2006                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2007                 break;
2008         }
2009
2010         case SIGNAL_TYPE_DVI_DUAL_LINK: {
2011                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2012                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2013                 break;
2014         }
2015
2016         case SIGNAL_TYPE_LVDS: {
2017                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2018                 sink_caps.signal = SIGNAL_TYPE_LVDS;
2019                 break;
2020         }
2021
2022         case SIGNAL_TYPE_EDP: {
2023                 sink_caps.transaction_type =
2024                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2025                 sink_caps.signal = SIGNAL_TYPE_EDP;
2026                 break;
2027         }
2028
2029         case SIGNAL_TYPE_DISPLAY_PORT: {
2030                 sink_caps.transaction_type =
2031                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2032                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2033                 break;
2034         }
2035
2036         default:
2037                 DC_ERROR("Invalid connector type! signal:%d\n",
2038                         link->connector_signal);
2039                 return;
2040         }
2041
2042         sink_init_data.link = link;
2043         sink_init_data.sink_signal = sink_caps.signal;
2044
2045         sink = dc_sink_create(&sink_init_data);
2046         if (!sink) {
2047                 DC_ERROR("Failed to create sink!\n");
2048                 return;
2049         }
2050
2051         /* dc_sink_create returns a new reference */
2052         link->local_sink = sink;
2053
2054         edid_status = dm_helpers_read_local_edid(
2055                         link->ctx,
2056                         link,
2057                         sink);
2058
2059         if (edid_status != EDID_OK)
2060                 DC_ERROR("Failed to read EDID");
2061
2062 }
2063
2064 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2065                                      struct amdgpu_display_manager *dm)
2066 {
2067         struct {
2068                 struct dc_surface_update surface_updates[MAX_SURFACES];
2069                 struct dc_plane_info plane_infos[MAX_SURFACES];
2070                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2071                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2072                 struct dc_stream_update stream_update;
2073         } * bundle;
2074         int k, m;
2075
2076         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2077
2078         if (!bundle) {
2079                 dm_error("Failed to allocate update bundle\n");
2080                 goto cleanup;
2081         }
2082
2083         for (k = 0; k < dc_state->stream_count; k++) {
2084                 bundle->stream_update.stream = dc_state->streams[k];
2085
2086                 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2087                         bundle->surface_updates[m].surface =
2088                                 dc_state->stream_status->plane_states[m];
2089                         bundle->surface_updates[m].surface->force_full_update =
2090                                 true;
2091                 }
2092                 dc_commit_updates_for_stream(
2093                         dm->dc, bundle->surface_updates,
2094                         dc_state->stream_status->plane_count,
2095                         dc_state->streams[k], &bundle->stream_update, dc_state);
2096         }
2097
2098 cleanup:
2099         kfree(bundle);
2100
2101         return;
2102 }
2103
2104 static void dm_set_dpms_off(struct dc_link *link)
2105 {
2106         struct dc_stream_state *stream_state;
2107         struct amdgpu_dm_connector *aconnector = link->priv;
2108         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2109         struct dc_stream_update stream_update;
2110         bool dpms_off = true;
2111
2112         memset(&stream_update, 0, sizeof(stream_update));
2113         stream_update.dpms_off = &dpms_off;
2114
2115         mutex_lock(&adev->dm.dc_lock);
2116         stream_state = dc_stream_find_from_link(link);
2117
2118         if (stream_state == NULL) {
2119                 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2120                 mutex_unlock(&adev->dm.dc_lock);
2121                 return;
2122         }
2123
2124         stream_update.stream = stream_state;
2125         dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2126                                      stream_state, &stream_update,
2127                                      stream_state->ctx->dc->current_state);
2128         mutex_unlock(&adev->dm.dc_lock);
2129 }
2130
2131 static int dm_resume(void *handle)
2132 {
2133         struct amdgpu_device *adev = handle;
2134         struct drm_device *ddev = adev_to_drm(adev);
2135         struct amdgpu_display_manager *dm = &adev->dm;
2136         struct amdgpu_dm_connector *aconnector;
2137         struct drm_connector *connector;
2138         struct drm_connector_list_iter iter;
2139         struct drm_crtc *crtc;
2140         struct drm_crtc_state *new_crtc_state;
2141         struct dm_crtc_state *dm_new_crtc_state;
2142         struct drm_plane *plane;
2143         struct drm_plane_state *new_plane_state;
2144         struct dm_plane_state *dm_new_plane_state;
2145         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2146         enum dc_connection_type new_connection_type = dc_connection_none;
2147         struct dc_state *dc_state;
2148         int i, r, j;
2149
2150         if (amdgpu_in_reset(adev)) {
2151                 dc_state = dm->cached_dc_state;
2152
2153                 r = dm_dmub_hw_init(adev);
2154                 if (r)
2155                         DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2156
2157                 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2158                 dc_resume(dm->dc);
2159
2160                 amdgpu_dm_irq_resume_early(adev);
2161
2162                 for (i = 0; i < dc_state->stream_count; i++) {
2163                         dc_state->streams[i]->mode_changed = true;
2164                         for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2165                                 dc_state->stream_status->plane_states[j]->update_flags.raw
2166                                         = 0xffffffff;
2167                         }
2168                 }
2169
2170                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2171
2172                 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2173
2174                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2175
2176                 dc_release_state(dm->cached_dc_state);
2177                 dm->cached_dc_state = NULL;
2178
2179                 amdgpu_dm_irq_resume_late(adev);
2180
2181                 mutex_unlock(&dm->dc_lock);
2182
2183                 return 0;
2184         }
2185         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2186         dc_release_state(dm_state->context);
2187         dm_state->context = dc_create_state(dm->dc);
2188         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2189         dc_resource_state_construct(dm->dc, dm_state->context);
2190
2191         /* Before powering on DC we need to re-initialize DMUB. */
2192         r = dm_dmub_hw_init(adev);
2193         if (r)
2194                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2195
2196         /* power on hardware */
2197         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2198
2199         /* program HPD filter */
2200         dc_resume(dm->dc);
2201
2202         /*
2203          * early enable HPD Rx IRQ, should be done before set mode as short
2204          * pulse interrupts are used for MST
2205          */
2206         amdgpu_dm_irq_resume_early(adev);
2207
2208         /* On resume we need to rewrite the MSTM control bits to enable MST*/
2209         s3_handle_mst(ddev, false);
2210
2211         /* Do detection*/
2212         drm_connector_list_iter_begin(ddev, &iter);
2213         drm_for_each_connector_iter(connector, &iter) {
2214                 aconnector = to_amdgpu_dm_connector(connector);
2215
2216                 /*
2217                  * this is the case when traversing through already created
2218                  * MST connectors, should be skipped
2219                  */
2220                 if (aconnector->mst_port)
2221                         continue;
2222
2223                 mutex_lock(&aconnector->hpd_lock);
2224                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2225                         DRM_ERROR("KMS: Failed to detect connector\n");
2226
2227                 if (aconnector->base.force && new_connection_type == dc_connection_none)
2228                         emulated_link_detect(aconnector->dc_link);
2229                 else
2230                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2231
2232                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2233                         aconnector->fake_enable = false;
2234
2235                 if (aconnector->dc_sink)
2236                         dc_sink_release(aconnector->dc_sink);
2237                 aconnector->dc_sink = NULL;
2238                 amdgpu_dm_update_connector_after_detect(aconnector);
2239                 mutex_unlock(&aconnector->hpd_lock);
2240         }
2241         drm_connector_list_iter_end(&iter);
2242
2243         /* Force mode set in atomic commit */
2244         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2245                 new_crtc_state->active_changed = true;
2246
2247         /*
2248          * atomic_check is expected to create the dc states. We need to release
2249          * them here, since they were duplicated as part of the suspend
2250          * procedure.
2251          */
2252         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2253                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2254                 if (dm_new_crtc_state->stream) {
2255                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2256                         dc_stream_release(dm_new_crtc_state->stream);
2257                         dm_new_crtc_state->stream = NULL;
2258                 }
2259         }
2260
2261         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2262                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2263                 if (dm_new_plane_state->dc_state) {
2264                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2265                         dc_plane_state_release(dm_new_plane_state->dc_state);
2266                         dm_new_plane_state->dc_state = NULL;
2267                 }
2268         }
2269
2270         drm_atomic_helper_resume(ddev, dm->cached_state);
2271
2272         dm->cached_state = NULL;
2273
2274 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
2275         amdgpu_dm_crtc_secure_display_resume(adev);
2276 #endif
2277
2278         amdgpu_dm_irq_resume_late(adev);
2279
2280         amdgpu_dm_smu_write_watermarks_table(adev);
2281
2282         return 0;
2283 }
2284
2285 /**
2286  * DOC: DM Lifecycle
2287  *
2288  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2289  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2290  * the base driver's device list to be initialized and torn down accordingly.
2291  *
2292  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2293  */
2294
2295 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2296         .name = "dm",
2297         .early_init = dm_early_init,
2298         .late_init = dm_late_init,
2299         .sw_init = dm_sw_init,
2300         .sw_fini = dm_sw_fini,
2301         .hw_init = dm_hw_init,
2302         .hw_fini = dm_hw_fini,
2303         .suspend = dm_suspend,
2304         .resume = dm_resume,
2305         .is_idle = dm_is_idle,
2306         .wait_for_idle = dm_wait_for_idle,
2307         .check_soft_reset = dm_check_soft_reset,
2308         .soft_reset = dm_soft_reset,
2309         .set_clockgating_state = dm_set_clockgating_state,
2310         .set_powergating_state = dm_set_powergating_state,
2311 };
2312
2313 const struct amdgpu_ip_block_version dm_ip_block =
2314 {
2315         .type = AMD_IP_BLOCK_TYPE_DCE,
2316         .major = 1,
2317         .minor = 0,
2318         .rev = 0,
2319         .funcs = &amdgpu_dm_funcs,
2320 };
2321
2322
2323 /**
2324  * DOC: atomic
2325  *
2326  * *WIP*
2327  */
2328
2329 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2330         .fb_create = amdgpu_display_user_framebuffer_create,
2331         .get_format_info = amd_get_format_info,
2332         .output_poll_changed = drm_fb_helper_output_poll_changed,
2333         .atomic_check = amdgpu_dm_atomic_check,
2334         .atomic_commit = drm_atomic_helper_commit,
2335 };
2336
2337 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2338         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2339 };
2340
2341 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2342 {
2343         u32 max_cll, min_cll, max, min, q, r;
2344         struct amdgpu_dm_backlight_caps *caps;
2345         struct amdgpu_display_manager *dm;
2346         struct drm_connector *conn_base;
2347         struct amdgpu_device *adev;
2348         struct dc_link *link = NULL;
2349         static const u8 pre_computed_values[] = {
2350                 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2351                 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2352
2353         if (!aconnector || !aconnector->dc_link)
2354                 return;
2355
2356         link = aconnector->dc_link;
2357         if (link->connector_signal != SIGNAL_TYPE_EDP)
2358                 return;
2359
2360         conn_base = &aconnector->base;
2361         adev = drm_to_adev(conn_base->dev);
2362         dm = &adev->dm;
2363         caps = &dm->backlight_caps;
2364         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2365         caps->aux_support = false;
2366         max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2367         min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2368
2369         if (caps->ext_caps->bits.oled == 1 ||
2370             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2371             caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2372                 caps->aux_support = true;
2373
2374         if (amdgpu_backlight == 0)
2375                 caps->aux_support = false;
2376         else if (amdgpu_backlight == 1)
2377                 caps->aux_support = true;
2378
2379         /* From the specification (CTA-861-G), for calculating the maximum
2380          * luminance we need to use:
2381          *      Luminance = 50*2**(CV/32)
2382          * Where CV is a one-byte value.
2383          * For calculating this expression we may need float point precision;
2384          * to avoid this complexity level, we take advantage that CV is divided
2385          * by a constant. From the Euclids division algorithm, we know that CV
2386          * can be written as: CV = 32*q + r. Next, we replace CV in the
2387          * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2388          * need to pre-compute the value of r/32. For pre-computing the values
2389          * We just used the following Ruby line:
2390          *      (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2391          * The results of the above expressions can be verified at
2392          * pre_computed_values.
2393          */
2394         q = max_cll >> 5;
2395         r = max_cll % 32;
2396         max = (1 << q) * pre_computed_values[r];
2397
2398         // min luminance: maxLum * (CV/255)^2 / 100
2399         q = DIV_ROUND_CLOSEST(min_cll, 255);
2400         min = max * DIV_ROUND_CLOSEST((q * q), 100);
2401
2402         caps->aux_max_input_signal = max;
2403         caps->aux_min_input_signal = min;
2404 }
2405
2406 void amdgpu_dm_update_connector_after_detect(
2407                 struct amdgpu_dm_connector *aconnector)
2408 {
2409         struct drm_connector *connector = &aconnector->base;
2410         struct drm_device *dev = connector->dev;
2411         struct dc_sink *sink;
2412
2413         /* MST handled by drm_mst framework */
2414         if (aconnector->mst_mgr.mst_state == true)
2415                 return;
2416
2417         sink = aconnector->dc_link->local_sink;
2418         if (sink)
2419                 dc_sink_retain(sink);
2420
2421         /*
2422          * Edid mgmt connector gets first update only in mode_valid hook and then
2423          * the connector sink is set to either fake or physical sink depends on link status.
2424          * Skip if already done during boot.
2425          */
2426         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2427                         && aconnector->dc_em_sink) {
2428
2429                 /*
2430                  * For S3 resume with headless use eml_sink to fake stream
2431                  * because on resume connector->sink is set to NULL
2432                  */
2433                 mutex_lock(&dev->mode_config.mutex);
2434
2435                 if (sink) {
2436                         if (aconnector->dc_sink) {
2437                                 amdgpu_dm_update_freesync_caps(connector, NULL);
2438                                 /*
2439                                  * retain and release below are used to
2440                                  * bump up refcount for sink because the link doesn't point
2441                                  * to it anymore after disconnect, so on next crtc to connector
2442                                  * reshuffle by UMD we will get into unwanted dc_sink release
2443                                  */
2444                                 dc_sink_release(aconnector->dc_sink);
2445                         }
2446                         aconnector->dc_sink = sink;
2447                         dc_sink_retain(aconnector->dc_sink);
2448                         amdgpu_dm_update_freesync_caps(connector,
2449                                         aconnector->edid);
2450                 } else {
2451                         amdgpu_dm_update_freesync_caps(connector, NULL);
2452                         if (!aconnector->dc_sink) {
2453                                 aconnector->dc_sink = aconnector->dc_em_sink;
2454                                 dc_sink_retain(aconnector->dc_sink);
2455                         }
2456                 }
2457
2458                 mutex_unlock(&dev->mode_config.mutex);
2459
2460                 if (sink)
2461                         dc_sink_release(sink);
2462                 return;
2463         }
2464
2465         /*
2466          * TODO: temporary guard to look for proper fix
2467          * if this sink is MST sink, we should not do anything
2468          */
2469         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2470                 dc_sink_release(sink);
2471                 return;
2472         }
2473
2474         if (aconnector->dc_sink == sink) {
2475                 /*
2476                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
2477                  * Do nothing!!
2478                  */
2479                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2480                                 aconnector->connector_id);
2481                 if (sink)
2482                         dc_sink_release(sink);
2483                 return;
2484         }
2485
2486         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2487                 aconnector->connector_id, aconnector->dc_sink, sink);
2488
2489         mutex_lock(&dev->mode_config.mutex);
2490
2491         /*
2492          * 1. Update status of the drm connector
2493          * 2. Send an event and let userspace tell us what to do
2494          */
2495         if (sink) {
2496                 /*
2497                  * TODO: check if we still need the S3 mode update workaround.
2498                  * If yes, put it here.
2499                  */
2500                 if (aconnector->dc_sink) {
2501                         amdgpu_dm_update_freesync_caps(connector, NULL);
2502                         dc_sink_release(aconnector->dc_sink);
2503                 }
2504
2505                 aconnector->dc_sink = sink;
2506                 dc_sink_retain(aconnector->dc_sink);
2507                 if (sink->dc_edid.length == 0) {
2508                         aconnector->edid = NULL;
2509                         if (aconnector->dc_link->aux_mode) {
2510                                 drm_dp_cec_unset_edid(
2511                                         &aconnector->dm_dp_aux.aux);
2512                         }
2513                 } else {
2514                         aconnector->edid =
2515                                 (struct edid *)sink->dc_edid.raw_edid;
2516
2517                         drm_connector_update_edid_property(connector,
2518                                                            aconnector->edid);
2519                         if (aconnector->dc_link->aux_mode)
2520                                 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2521                                                     aconnector->edid);
2522                 }
2523
2524                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2525                 update_connector_ext_caps(aconnector);
2526         } else {
2527                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2528                 amdgpu_dm_update_freesync_caps(connector, NULL);
2529                 drm_connector_update_edid_property(connector, NULL);
2530                 aconnector->num_modes = 0;
2531                 dc_sink_release(aconnector->dc_sink);
2532                 aconnector->dc_sink = NULL;
2533                 aconnector->edid = NULL;
2534 #ifdef CONFIG_DRM_AMD_DC_HDCP
2535                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2536                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2537                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2538 #endif
2539         }
2540
2541         mutex_unlock(&dev->mode_config.mutex);
2542
2543         update_subconnector_property(aconnector);
2544
2545         if (sink)
2546                 dc_sink_release(sink);
2547 }
2548
2549 static void handle_hpd_irq(void *param)
2550 {
2551         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2552         struct drm_connector *connector = &aconnector->base;
2553         struct drm_device *dev = connector->dev;
2554         enum dc_connection_type new_connection_type = dc_connection_none;
2555 #ifdef CONFIG_DRM_AMD_DC_HDCP
2556         struct amdgpu_device *adev = drm_to_adev(dev);
2557         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2558 #endif
2559
2560         /*
2561          * In case of failure or MST no need to update connector status or notify the OS
2562          * since (for MST case) MST does this in its own context.
2563          */
2564         mutex_lock(&aconnector->hpd_lock);
2565
2566 #ifdef CONFIG_DRM_AMD_DC_HDCP
2567         if (adev->dm.hdcp_workqueue) {
2568                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2569                 dm_con_state->update_hdcp = true;
2570         }
2571 #endif
2572         if (aconnector->fake_enable)
2573                 aconnector->fake_enable = false;
2574
2575         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2576                 DRM_ERROR("KMS: Failed to detect connector\n");
2577
2578         if (aconnector->base.force && new_connection_type == dc_connection_none) {
2579                 emulated_link_detect(aconnector->dc_link);
2580
2581
2582                 drm_modeset_lock_all(dev);
2583                 dm_restore_drm_connector_state(dev, connector);
2584                 drm_modeset_unlock_all(dev);
2585
2586                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2587                         drm_kms_helper_hotplug_event(dev);
2588
2589         } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2590                 if (new_connection_type == dc_connection_none &&
2591                     aconnector->dc_link->type == dc_connection_none)
2592                         dm_set_dpms_off(aconnector->dc_link);
2593
2594                 amdgpu_dm_update_connector_after_detect(aconnector);
2595
2596                 drm_modeset_lock_all(dev);
2597                 dm_restore_drm_connector_state(dev, connector);
2598                 drm_modeset_unlock_all(dev);
2599
2600                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2601                         drm_kms_helper_hotplug_event(dev);
2602         }
2603         mutex_unlock(&aconnector->hpd_lock);
2604
2605 }
2606
2607 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2608 {
2609         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2610         uint8_t dret;
2611         bool new_irq_handled = false;
2612         int dpcd_addr;
2613         int dpcd_bytes_to_read;
2614
2615         const int max_process_count = 30;
2616         int process_count = 0;
2617
2618         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2619
2620         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2621                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2622                 /* DPCD 0x200 - 0x201 for downstream IRQ */
2623                 dpcd_addr = DP_SINK_COUNT;
2624         } else {
2625                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2626                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2627                 dpcd_addr = DP_SINK_COUNT_ESI;
2628         }
2629
2630         dret = drm_dp_dpcd_read(
2631                 &aconnector->dm_dp_aux.aux,
2632                 dpcd_addr,
2633                 esi,
2634                 dpcd_bytes_to_read);
2635
2636         while (dret == dpcd_bytes_to_read &&
2637                 process_count < max_process_count) {
2638                 uint8_t retry;
2639                 dret = 0;
2640
2641                 process_count++;
2642
2643                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2644                 /* handle HPD short pulse irq */
2645                 if (aconnector->mst_mgr.mst_state)
2646                         drm_dp_mst_hpd_irq(
2647                                 &aconnector->mst_mgr,
2648                                 esi,
2649                                 &new_irq_handled);
2650
2651                 if (new_irq_handled) {
2652                         /* ACK at DPCD to notify down stream */
2653                         const int ack_dpcd_bytes_to_write =
2654                                 dpcd_bytes_to_read - 1;
2655
2656                         for (retry = 0; retry < 3; retry++) {
2657                                 uint8_t wret;
2658
2659                                 wret = drm_dp_dpcd_write(
2660                                         &aconnector->dm_dp_aux.aux,
2661                                         dpcd_addr + 1,
2662                                         &esi[1],
2663                                         ack_dpcd_bytes_to_write);
2664                                 if (wret == ack_dpcd_bytes_to_write)
2665                                         break;
2666                         }
2667
2668                         /* check if there is new irq to be handled */
2669                         dret = drm_dp_dpcd_read(
2670                                 &aconnector->dm_dp_aux.aux,
2671                                 dpcd_addr,
2672                                 esi,
2673                                 dpcd_bytes_to_read);
2674
2675                         new_irq_handled = false;
2676                 } else {
2677                         break;
2678                 }
2679         }
2680
2681         if (process_count == max_process_count)
2682                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2683 }
2684
2685 static void handle_hpd_rx_irq(void *param)
2686 {
2687         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2688         struct drm_connector *connector = &aconnector->base;
2689         struct drm_device *dev = connector->dev;
2690         struct dc_link *dc_link = aconnector->dc_link;
2691         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2692         bool result = false;
2693         enum dc_connection_type new_connection_type = dc_connection_none;
2694         struct amdgpu_device *adev = drm_to_adev(dev);
2695         union hpd_irq_data hpd_irq_data;
2696
2697         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2698
2699         /*
2700          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2701          * conflict, after implement i2c helper, this mutex should be
2702          * retired.
2703          */
2704         if (dc_link->type != dc_connection_mst_branch)
2705                 mutex_lock(&aconnector->hpd_lock);
2706
2707         read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2708
2709         if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2710                 (dc_link->type == dc_connection_mst_branch)) {
2711                 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2712                         result = true;
2713                         dm_handle_hpd_rx_irq(aconnector);
2714                         goto out;
2715                 } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2716                         result = false;
2717                         dm_handle_hpd_rx_irq(aconnector);
2718                         goto out;
2719                 }
2720         }
2721
2722         mutex_lock(&adev->dm.dc_lock);
2723 #ifdef CONFIG_DRM_AMD_DC_HDCP
2724         result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2725 #else
2726         result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2727 #endif
2728         mutex_unlock(&adev->dm.dc_lock);
2729
2730 out:
2731         if (result && !is_mst_root_connector) {
2732                 /* Downstream Port status changed. */
2733                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2734                         DRM_ERROR("KMS: Failed to detect connector\n");
2735
2736                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2737                         emulated_link_detect(dc_link);
2738
2739                         if (aconnector->fake_enable)
2740                                 aconnector->fake_enable = false;
2741
2742                         amdgpu_dm_update_connector_after_detect(aconnector);
2743
2744
2745                         drm_modeset_lock_all(dev);
2746                         dm_restore_drm_connector_state(dev, connector);
2747                         drm_modeset_unlock_all(dev);
2748
2749                         drm_kms_helper_hotplug_event(dev);
2750                 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2751
2752                         if (aconnector->fake_enable)
2753                                 aconnector->fake_enable = false;
2754
2755                         amdgpu_dm_update_connector_after_detect(aconnector);
2756
2757
2758                         drm_modeset_lock_all(dev);
2759                         dm_restore_drm_connector_state(dev, connector);
2760                         drm_modeset_unlock_all(dev);
2761
2762                         drm_kms_helper_hotplug_event(dev);
2763                 }
2764         }
2765 #ifdef CONFIG_DRM_AMD_DC_HDCP
2766         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2767                 if (adev->dm.hdcp_workqueue)
2768                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2769         }
2770 #endif
2771
2772         if (dc_link->type != dc_connection_mst_branch) {
2773                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2774                 mutex_unlock(&aconnector->hpd_lock);
2775         }
2776 }
2777
2778 static void register_hpd_handlers(struct amdgpu_device *adev)
2779 {
2780         struct drm_device *dev = adev_to_drm(adev);
2781         struct drm_connector *connector;
2782         struct amdgpu_dm_connector *aconnector;
2783         const struct dc_link *dc_link;
2784         struct dc_interrupt_params int_params = {0};
2785
2786         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2787         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2788
2789         list_for_each_entry(connector,
2790                         &dev->mode_config.connector_list, head) {
2791
2792                 aconnector = to_amdgpu_dm_connector(connector);
2793                 dc_link = aconnector->dc_link;
2794
2795                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2796                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2797                         int_params.irq_source = dc_link->irq_source_hpd;
2798
2799                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2800                                         handle_hpd_irq,
2801                                         (void *) aconnector);
2802                 }
2803
2804                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2805
2806                         /* Also register for DP short pulse (hpd_rx). */
2807                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2808                         int_params.irq_source = dc_link->irq_source_hpd_rx;
2809
2810                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2811                                         handle_hpd_rx_irq,
2812                                         (void *) aconnector);
2813                 }
2814         }
2815 }
2816
2817 #if defined(CONFIG_DRM_AMD_DC_SI)
2818 /* Register IRQ sources and initialize IRQ callbacks */
2819 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2820 {
2821         struct dc *dc = adev->dm.dc;
2822         struct common_irq_params *c_irq_params;
2823         struct dc_interrupt_params int_params = {0};
2824         int r;
2825         int i;
2826         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2827
2828         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2829         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2830
2831         /*
2832          * Actions of amdgpu_irq_add_id():
2833          * 1. Register a set() function with base driver.
2834          *    Base driver will call set() function to enable/disable an
2835          *    interrupt in DC hardware.
2836          * 2. Register amdgpu_dm_irq_handler().
2837          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2838          *    coming from DC hardware.
2839          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2840          *    for acknowledging and handling. */
2841
2842         /* Use VBLANK interrupt */
2843         for (i = 0; i < adev->mode_info.num_crtc; i++) {
2844                 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2845                 if (r) {
2846                         DRM_ERROR("Failed to add crtc irq id!\n");
2847                         return r;
2848                 }
2849
2850                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2851                 int_params.irq_source =
2852                         dc_interrupt_to_irq_source(dc, i+1 , 0);
2853
2854                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2855
2856                 c_irq_params->adev = adev;
2857                 c_irq_params->irq_src = int_params.irq_source;
2858
2859                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2860                                 dm_crtc_high_irq, c_irq_params);
2861         }
2862
2863         /* Use GRPH_PFLIP interrupt */
2864         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2865                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2866                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2867                 if (r) {
2868                         DRM_ERROR("Failed to add page flip irq id!\n");
2869                         return r;
2870                 }
2871
2872                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2873                 int_params.irq_source =
2874                         dc_interrupt_to_irq_source(dc, i, 0);
2875
2876                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2877
2878                 c_irq_params->adev = adev;
2879                 c_irq_params->irq_src = int_params.irq_source;
2880
2881                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2882                                 dm_pflip_high_irq, c_irq_params);
2883
2884         }
2885
2886         /* HPD */
2887         r = amdgpu_irq_add_id(adev, client_id,
2888                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2889         if (r) {
2890                 DRM_ERROR("Failed to add hpd irq id!\n");
2891                 return r;
2892         }
2893
2894         register_hpd_handlers(adev);
2895
2896         return 0;
2897 }
2898 #endif
2899
2900 /* Register IRQ sources and initialize IRQ callbacks */
2901 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2902 {
2903         struct dc *dc = adev->dm.dc;
2904         struct common_irq_params *c_irq_params;
2905         struct dc_interrupt_params int_params = {0};
2906         int r;
2907         int i;
2908         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2909
2910         if (adev->asic_type >= CHIP_VEGA10)
2911                 client_id = SOC15_IH_CLIENTID_DCE;
2912
2913         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2914         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2915
2916         /*
2917          * Actions of amdgpu_irq_add_id():
2918          * 1. Register a set() function with base driver.
2919          *    Base driver will call set() function to enable/disable an
2920          *    interrupt in DC hardware.
2921          * 2. Register amdgpu_dm_irq_handler().
2922          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2923          *    coming from DC hardware.
2924          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2925          *    for acknowledging and handling. */
2926
2927         /* Use VBLANK interrupt */
2928         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2929                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2930                 if (r) {
2931                         DRM_ERROR("Failed to add crtc irq id!\n");
2932                         return r;
2933                 }
2934
2935                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2936                 int_params.irq_source =
2937                         dc_interrupt_to_irq_source(dc, i, 0);
2938
2939                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2940
2941                 c_irq_params->adev = adev;
2942                 c_irq_params->irq_src = int_params.irq_source;
2943
2944                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2945                                 dm_crtc_high_irq, c_irq_params);
2946         }
2947
2948         /* Use VUPDATE interrupt */
2949         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2950                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2951                 if (r) {
2952                         DRM_ERROR("Failed to add vupdate irq id!\n");
2953                         return r;
2954                 }
2955
2956                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2957                 int_params.irq_source =
2958                         dc_interrupt_to_irq_source(dc, i, 0);
2959
2960                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2961
2962                 c_irq_params->adev = adev;
2963                 c_irq_params->irq_src = int_params.irq_source;
2964
2965                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2966                                 dm_vupdate_high_irq, c_irq_params);
2967         }
2968
2969         /* Use GRPH_PFLIP interrupt */
2970         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2971                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2972                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2973                 if (r) {
2974                         DRM_ERROR("Failed to add page flip irq id!\n");
2975                         return r;
2976                 }
2977
2978                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2979                 int_params.irq_source =
2980                         dc_interrupt_to_irq_source(dc, i, 0);
2981
2982                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2983
2984                 c_irq_params->adev = adev;
2985                 c_irq_params->irq_src = int_params.irq_source;
2986
2987                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2988                                 dm_pflip_high_irq, c_irq_params);
2989
2990         }
2991
2992         /* HPD */
2993         r = amdgpu_irq_add_id(adev, client_id,
2994                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2995         if (r) {
2996                 DRM_ERROR("Failed to add hpd irq id!\n");
2997                 return r;
2998         }
2999
3000         register_hpd_handlers(adev);
3001
3002         return 0;
3003 }
3004
3005 #if defined(CONFIG_DRM_AMD_DC_DCN)
3006 /* Register IRQ sources and initialize IRQ callbacks */
3007 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3008 {
3009         struct dc *dc = adev->dm.dc;
3010         struct common_irq_params *c_irq_params;
3011         struct dc_interrupt_params int_params = {0};
3012         int r;
3013         int i;
3014 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3015         static const unsigned int vrtl_int_srcid[] = {
3016                 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3017                 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3018                 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3019                 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3020                 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3021                 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3022         };
3023 #endif
3024
3025         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3026         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3027
3028         /*
3029          * Actions of amdgpu_irq_add_id():
3030          * 1. Register a set() function with base driver.
3031          *    Base driver will call set() function to enable/disable an
3032          *    interrupt in DC hardware.
3033          * 2. Register amdgpu_dm_irq_handler().
3034          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3035          *    coming from DC hardware.
3036          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3037          *    for acknowledging and handling.
3038          */
3039
3040         /* Use VSTARTUP interrupt */
3041         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3042                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3043                         i++) {
3044                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3045
3046                 if (r) {
3047                         DRM_ERROR("Failed to add crtc irq id!\n");
3048                         return r;
3049                 }
3050
3051                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3052                 int_params.irq_source =
3053                         dc_interrupt_to_irq_source(dc, i, 0);
3054
3055                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3056
3057                 c_irq_params->adev = adev;
3058                 c_irq_params->irq_src = int_params.irq_source;
3059
3060                 amdgpu_dm_irq_register_interrupt(
3061                         adev, &int_params, dm_crtc_high_irq, c_irq_params);
3062         }
3063
3064         /* Use otg vertical line interrupt */
3065 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3066         for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3067                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3068                                 vrtl_int_srcid[i], &adev->vline0_irq);
3069
3070                 if (r) {
3071                         DRM_ERROR("Failed to add vline0 irq id!\n");
3072                         return r;
3073                 }
3074
3075                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3076                 int_params.irq_source =
3077                         dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3078
3079                 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3080                         DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3081                         break;
3082                 }
3083
3084                 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3085                                         - DC_IRQ_SOURCE_DC1_VLINE0];
3086
3087                 c_irq_params->adev = adev;
3088                 c_irq_params->irq_src = int_params.irq_source;
3089
3090                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3091                                 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3092         }
3093 #endif
3094
3095         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3096          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3097          * to trigger at end of each vblank, regardless of state of the lock,
3098          * matching DCE behaviour.
3099          */
3100         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3101              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3102              i++) {
3103                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3104
3105                 if (r) {
3106                         DRM_ERROR("Failed to add vupdate irq id!\n");
3107                         return r;
3108                 }
3109
3110                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3111                 int_params.irq_source =
3112                         dc_interrupt_to_irq_source(dc, i, 0);
3113
3114                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3115
3116                 c_irq_params->adev = adev;
3117                 c_irq_params->irq_src = int_params.irq_source;
3118
3119                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3120                                 dm_vupdate_high_irq, c_irq_params);
3121         }
3122
3123         /* Use GRPH_PFLIP interrupt */
3124         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3125                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3126                         i++) {
3127                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3128                 if (r) {
3129                         DRM_ERROR("Failed to add page flip irq id!\n");
3130                         return r;
3131                 }
3132
3133                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3134                 int_params.irq_source =
3135                         dc_interrupt_to_irq_source(dc, i, 0);
3136
3137                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3138
3139                 c_irq_params->adev = adev;
3140                 c_irq_params->irq_src = int_params.irq_source;
3141
3142                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3143                                 dm_pflip_high_irq, c_irq_params);
3144
3145         }
3146
3147         if (dc->ctx->dmub_srv) {
3148                 i = DCN_1_0__SRCID__DMCUB_OUTBOX_HIGH_PRIORITY_READY_INT;
3149                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->dmub_trace_irq);
3150
3151                 if (r) {
3152                         DRM_ERROR("Failed to add dmub trace irq id!\n");
3153                         return r;
3154                 }
3155
3156                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3157                 int_params.irq_source =
3158                         dc_interrupt_to_irq_source(dc, i, 0);
3159
3160                 c_irq_params = &adev->dm.dmub_trace_params[0];
3161
3162                 c_irq_params->adev = adev;
3163                 c_irq_params->irq_src = int_params.irq_source;
3164
3165                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3166                                 dm_dmub_trace_high_irq, c_irq_params);
3167         }
3168
3169         /* HPD */
3170         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3171                         &adev->hpd_irq);
3172         if (r) {
3173                 DRM_ERROR("Failed to add hpd irq id!\n");
3174                 return r;
3175         }
3176
3177         register_hpd_handlers(adev);
3178
3179         return 0;
3180 }
3181 #endif
3182
3183 /*
3184  * Acquires the lock for the atomic state object and returns
3185  * the new atomic state.
3186  *
3187  * This should only be called during atomic check.
3188  */
3189 static int dm_atomic_get_state(struct drm_atomic_state *state,
3190                                struct dm_atomic_state **dm_state)
3191 {
3192         struct drm_device *dev = state->dev;
3193         struct amdgpu_device *adev = drm_to_adev(dev);
3194         struct amdgpu_display_manager *dm = &adev->dm;
3195         struct drm_private_state *priv_state;
3196
3197         if (*dm_state)
3198                 return 0;
3199
3200         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3201         if (IS_ERR(priv_state))
3202                 return PTR_ERR(priv_state);
3203
3204         *dm_state = to_dm_atomic_state(priv_state);
3205
3206         return 0;
3207 }
3208
3209 static struct dm_atomic_state *
3210 dm_atomic_get_new_state(struct drm_atomic_state *state)
3211 {
3212         struct drm_device *dev = state->dev;
3213         struct amdgpu_device *adev = drm_to_adev(dev);
3214         struct amdgpu_display_manager *dm = &adev->dm;
3215         struct drm_private_obj *obj;
3216         struct drm_private_state *new_obj_state;
3217         int i;
3218
3219         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3220                 if (obj->funcs == dm->atomic_obj.funcs)
3221                         return to_dm_atomic_state(new_obj_state);
3222         }
3223
3224         return NULL;
3225 }
3226
3227 static struct drm_private_state *
3228 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3229 {
3230         struct dm_atomic_state *old_state, *new_state;
3231
3232         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3233         if (!new_state)
3234                 return NULL;
3235
3236         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3237
3238         old_state = to_dm_atomic_state(obj->state);
3239
3240         if (old_state && old_state->context)
3241                 new_state->context = dc_copy_state(old_state->context);
3242
3243         if (!new_state->context) {
3244                 kfree(new_state);
3245                 return NULL;
3246         }
3247
3248         return &new_state->base;
3249 }
3250
3251 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3252                                     struct drm_private_state *state)
3253 {
3254         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3255
3256         if (dm_state && dm_state->context)
3257                 dc_release_state(dm_state->context);
3258
3259         kfree(dm_state);
3260 }
3261
3262 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3263         .atomic_duplicate_state = dm_atomic_duplicate_state,
3264         .atomic_destroy_state = dm_atomic_destroy_state,
3265 };
3266
3267 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3268 {
3269         struct dm_atomic_state *state;
3270         int r;
3271
3272         adev->mode_info.mode_config_initialized = true;
3273
3274         adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3275         adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3276
3277         adev_to_drm(adev)->mode_config.max_width = 16384;
3278         adev_to_drm(adev)->mode_config.max_height = 16384;
3279
3280         adev_to_drm(adev)->mode_config.preferred_depth = 24;
3281         adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3282         /* indicates support for immediate flip */
3283         adev_to_drm(adev)->mode_config.async_page_flip = true;
3284
3285         adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3286
3287         state = kzalloc(sizeof(*state), GFP_KERNEL);
3288         if (!state)
3289                 return -ENOMEM;
3290
3291         state->context = dc_create_state(adev->dm.dc);
3292         if (!state->context) {
3293                 kfree(state);
3294                 return -ENOMEM;
3295         }
3296
3297         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3298
3299         drm_atomic_private_obj_init(adev_to_drm(adev),
3300                                     &adev->dm.atomic_obj,
3301                                     &state->base,
3302                                     &dm_atomic_state_funcs);
3303
3304         r = amdgpu_display_modeset_create_props(adev);
3305         if (r) {
3306                 dc_release_state(state->context);
3307                 kfree(state);
3308                 return r;
3309         }
3310
3311         r = amdgpu_dm_audio_init(adev);
3312         if (r) {
3313                 dc_release_state(state->context);
3314                 kfree(state);
3315                 return r;
3316         }
3317
3318         return 0;
3319 }
3320
3321 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3322 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3323 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3324
3325 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3326         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3327
3328 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3329 {
3330 #if defined(CONFIG_ACPI)
3331         struct amdgpu_dm_backlight_caps caps;
3332
3333         memset(&caps, 0, sizeof(caps));
3334
3335         if (dm->backlight_caps.caps_valid)
3336                 return;
3337
3338         amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3339         if (caps.caps_valid) {
3340                 dm->backlight_caps.caps_valid = true;
3341                 if (caps.aux_support)
3342                         return;
3343                 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3344                 dm->backlight_caps.max_input_signal = caps.max_input_signal;
3345         } else {
3346                 dm->backlight_caps.min_input_signal =
3347                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3348                 dm->backlight_caps.max_input_signal =
3349                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3350         }
3351 #else
3352         if (dm->backlight_caps.aux_support)
3353                 return;
3354
3355         dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3356         dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3357 #endif
3358 }
3359
3360 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3361                                 unsigned *min, unsigned *max)
3362 {
3363         if (!caps)
3364                 return 0;
3365
3366         if (caps->aux_support) {
3367                 // Firmware limits are in nits, DC API wants millinits.
3368                 *max = 1000 * caps->aux_max_input_signal;
3369                 *min = 1000 * caps->aux_min_input_signal;
3370         } else {
3371                 // Firmware limits are 8-bit, PWM control is 16-bit.
3372                 *max = 0x101 * caps->max_input_signal;
3373                 *min = 0x101 * caps->min_input_signal;
3374         }
3375         return 1;
3376 }
3377
3378 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3379                                         uint32_t brightness)
3380 {
3381         unsigned min, max;
3382
3383         if (!get_brightness_range(caps, &min, &max))
3384                 return brightness;
3385
3386         // Rescale 0..255 to min..max
3387         return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3388                                        AMDGPU_MAX_BL_LEVEL);
3389 }
3390
3391 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3392                                       uint32_t brightness)
3393 {
3394         unsigned min, max;
3395
3396         if (!get_brightness_range(caps, &min, &max))
3397                 return brightness;
3398
3399         if (brightness < min)
3400                 return 0;
3401         // Rescale min..max to 0..255
3402         return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3403                                  max - min);
3404 }
3405
3406 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3407 {
3408         struct amdgpu_display_manager *dm = bl_get_data(bd);
3409         struct amdgpu_dm_backlight_caps caps;
3410         struct dc_link *link = NULL;
3411         u32 brightness;
3412         bool rc;
3413
3414         amdgpu_dm_update_backlight_caps(dm);
3415         caps = dm->backlight_caps;
3416
3417         link = (struct dc_link *)dm->backlight_link;
3418
3419         brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3420         // Change brightness based on AUX property
3421         if (caps.aux_support)
3422                 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3423                                                       AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3424         else
3425                 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3426
3427         return rc ? 0 : 1;
3428 }
3429
3430 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3431 {
3432         struct amdgpu_display_manager *dm = bl_get_data(bd);
3433         struct amdgpu_dm_backlight_caps caps;
3434
3435         amdgpu_dm_update_backlight_caps(dm);
3436         caps = dm->backlight_caps;
3437
3438         if (caps.aux_support) {
3439                 struct dc_link *link = (struct dc_link *)dm->backlight_link;
3440                 u32 avg, peak;
3441                 bool rc;
3442
3443                 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3444                 if (!rc)
3445                         return bd->props.brightness;
3446                 return convert_brightness_to_user(&caps, avg);
3447         } else {
3448                 int ret = dc_link_get_backlight_level(dm->backlight_link);
3449
3450                 if (ret == DC_ERROR_UNEXPECTED)
3451                         return bd->props.brightness;
3452                 return convert_brightness_to_user(&caps, ret);
3453         }
3454 }
3455
3456 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3457         .options = BL_CORE_SUSPENDRESUME,
3458         .get_brightness = amdgpu_dm_backlight_get_brightness,
3459         .update_status  = amdgpu_dm_backlight_update_status,
3460 };
3461
3462 static void
3463 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3464 {
3465         char bl_name[16];
3466         struct backlight_properties props = { 0 };
3467
3468         amdgpu_dm_update_backlight_caps(dm);
3469
3470         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3471         props.brightness = AMDGPU_MAX_BL_LEVEL;
3472         props.type = BACKLIGHT_RAW;
3473
3474         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3475                  adev_to_drm(dm->adev)->primary->index);
3476
3477         dm->backlight_dev = backlight_device_register(bl_name,
3478                                                       adev_to_drm(dm->adev)->dev,
3479                                                       dm,
3480                                                       &amdgpu_dm_backlight_ops,
3481                                                       &props);
3482
3483         if (IS_ERR(dm->backlight_dev))
3484                 DRM_ERROR("DM: Backlight registration failed!\n");
3485         else
3486                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3487 }
3488
3489 #endif
3490
3491 static int initialize_plane(struct amdgpu_display_manager *dm,
3492                             struct amdgpu_mode_info *mode_info, int plane_id,
3493                             enum drm_plane_type plane_type,
3494                             const struct dc_plane_cap *plane_cap)
3495 {
3496         struct drm_plane *plane;
3497         unsigned long possible_crtcs;
3498         int ret = 0;
3499
3500         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3501         if (!plane) {
3502                 DRM_ERROR("KMS: Failed to allocate plane\n");
3503                 return -ENOMEM;
3504         }
3505         plane->type = plane_type;
3506
3507         /*
3508          * HACK: IGT tests expect that the primary plane for a CRTC
3509          * can only have one possible CRTC. Only expose support for
3510          * any CRTC if they're not going to be used as a primary plane
3511          * for a CRTC - like overlay or underlay planes.
3512          */
3513         possible_crtcs = 1 << plane_id;
3514         if (plane_id >= dm->dc->caps.max_streams)
3515                 possible_crtcs = 0xff;
3516
3517         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3518
3519         if (ret) {
3520                 DRM_ERROR("KMS: Failed to initialize plane\n");
3521                 kfree(plane);
3522                 return ret;
3523         }
3524
3525         if (mode_info)
3526                 mode_info->planes[plane_id] = plane;
3527
3528         return ret;
3529 }
3530
3531
3532 static void register_backlight_device(struct amdgpu_display_manager *dm,
3533                                       struct dc_link *link)
3534 {
3535 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3536         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3537
3538         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3539             link->type != dc_connection_none) {
3540                 /*
3541                  * Event if registration failed, we should continue with
3542                  * DM initialization because not having a backlight control
3543                  * is better then a black screen.
3544                  */
3545                 amdgpu_dm_register_backlight_device(dm);
3546
3547                 if (dm->backlight_dev)
3548                         dm->backlight_link = link;
3549         }
3550 #endif
3551 }
3552
3553
3554 /*
3555  * In this architecture, the association
3556  * connector -> encoder -> crtc
3557  * id not really requried. The crtc and connector will hold the
3558  * display_index as an abstraction to use with DAL component
3559  *
3560  * Returns 0 on success
3561  */
3562 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3563 {
3564         struct amdgpu_display_manager *dm = &adev->dm;
3565         int32_t i;
3566         struct amdgpu_dm_connector *aconnector = NULL;
3567         struct amdgpu_encoder *aencoder = NULL;
3568         struct amdgpu_mode_info *mode_info = &adev->mode_info;
3569         uint32_t link_cnt;
3570         int32_t primary_planes;
3571         enum dc_connection_type new_connection_type = dc_connection_none;
3572         const struct dc_plane_cap *plane;
3573
3574         dm->display_indexes_num = dm->dc->caps.max_streams;
3575         /* Update the actual used number of crtc */
3576         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3577
3578         link_cnt = dm->dc->caps.max_links;
3579         if (amdgpu_dm_mode_config_init(dm->adev)) {
3580                 DRM_ERROR("DM: Failed to initialize mode config\n");
3581                 return -EINVAL;
3582         }
3583
3584         /* There is one primary plane per CRTC */
3585         primary_planes = dm->dc->caps.max_streams;
3586         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3587
3588         /*
3589          * Initialize primary planes, implicit planes for legacy IOCTLS.
3590          * Order is reversed to match iteration order in atomic check.
3591          */
3592         for (i = (primary_planes - 1); i >= 0; i--) {
3593                 plane = &dm->dc->caps.planes[i];
3594
3595                 if (initialize_plane(dm, mode_info, i,
3596                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
3597                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
3598                         goto fail;
3599                 }
3600         }
3601
3602         /*
3603          * Initialize overlay planes, index starting after primary planes.
3604          * These planes have a higher DRM index than the primary planes since
3605          * they should be considered as having a higher z-order.
3606          * Order is reversed to match iteration order in atomic check.
3607          *
3608          * Only support DCN for now, and only expose one so we don't encourage
3609          * userspace to use up all the pipes.
3610          */
3611         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3612                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3613
3614                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3615                         continue;
3616
3617                 if (!plane->blends_with_above || !plane->blends_with_below)
3618                         continue;
3619
3620                 if (!plane->pixel_format_support.argb8888)
3621                         continue;
3622
3623                 if (initialize_plane(dm, NULL, primary_planes + i,
3624                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
3625                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3626                         goto fail;
3627                 }
3628
3629                 /* Only create one overlay plane. */
3630                 break;
3631         }
3632
3633         for (i = 0; i < dm->dc->caps.max_streams; i++)
3634                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3635                         DRM_ERROR("KMS: Failed to initialize crtc\n");
3636                         goto fail;
3637                 }
3638
3639         /* loops over all connectors on the board */
3640         for (i = 0; i < link_cnt; i++) {
3641                 struct dc_link *link = NULL;
3642
3643                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3644                         DRM_ERROR(
3645                                 "KMS: Cannot support more than %d display indexes\n",
3646                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
3647                         continue;
3648                 }
3649
3650                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3651                 if (!aconnector)
3652                         goto fail;
3653
3654                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3655                 if (!aencoder)
3656                         goto fail;
3657
3658                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3659                         DRM_ERROR("KMS: Failed to initialize encoder\n");
3660                         goto fail;
3661                 }
3662
3663                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3664                         DRM_ERROR("KMS: Failed to initialize connector\n");
3665                         goto fail;
3666                 }
3667
3668                 link = dc_get_link_at_index(dm->dc, i);
3669
3670                 if (!dc_link_detect_sink(link, &new_connection_type))
3671                         DRM_ERROR("KMS: Failed to detect connector\n");
3672
3673                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3674                         emulated_link_detect(link);
3675                         amdgpu_dm_update_connector_after_detect(aconnector);
3676
3677                 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3678                         amdgpu_dm_update_connector_after_detect(aconnector);
3679                         register_backlight_device(dm, link);
3680                         if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3681                                 amdgpu_dm_set_psr_caps(link);
3682                 }
3683
3684
3685         }
3686
3687         /* Software is initialized. Now we can register interrupt handlers. */
3688         switch (adev->asic_type) {
3689 #if defined(CONFIG_DRM_AMD_DC_SI)
3690         case CHIP_TAHITI:
3691         case CHIP_PITCAIRN:
3692         case CHIP_VERDE:
3693         case CHIP_OLAND:
3694                 if (dce60_register_irq_handlers(dm->adev)) {
3695                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3696                         goto fail;
3697                 }
3698                 break;
3699 #endif
3700         case CHIP_BONAIRE:
3701         case CHIP_HAWAII:
3702         case CHIP_KAVERI:
3703         case CHIP_KABINI:
3704         case CHIP_MULLINS:
3705         case CHIP_TONGA:
3706         case CHIP_FIJI:
3707         case CHIP_CARRIZO:
3708         case CHIP_STONEY:
3709         case CHIP_POLARIS11:
3710         case CHIP_POLARIS10:
3711         case CHIP_POLARIS12:
3712         case CHIP_VEGAM:
3713         case CHIP_VEGA10:
3714         case CHIP_VEGA12:
3715         case CHIP_VEGA20:
3716                 if (dce110_register_irq_handlers(dm->adev)) {
3717                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3718                         goto fail;
3719                 }
3720                 break;
3721 #if defined(CONFIG_DRM_AMD_DC_DCN)
3722         case CHIP_RAVEN:
3723         case CHIP_NAVI12:
3724         case CHIP_NAVI10:
3725         case CHIP_NAVI14:
3726         case CHIP_RENOIR:
3727         case CHIP_SIENNA_CICHLID:
3728         case CHIP_NAVY_FLOUNDER:
3729         case CHIP_DIMGREY_CAVEFISH:
3730         case CHIP_VANGOGH:
3731                 if (dcn10_register_irq_handlers(dm->adev)) {
3732                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3733                         goto fail;
3734                 }
3735                 break;
3736 #endif
3737         default:
3738                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3739                 goto fail;
3740         }
3741
3742         return 0;
3743 fail:
3744         kfree(aencoder);
3745         kfree(aconnector);
3746
3747         return -EINVAL;
3748 }
3749
3750 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3751 {
3752         drm_mode_config_cleanup(dm->ddev);
3753         drm_atomic_private_obj_fini(&dm->atomic_obj);
3754         return;
3755 }
3756
3757 /******************************************************************************
3758  * amdgpu_display_funcs functions
3759  *****************************************************************************/
3760
3761 /*
3762  * dm_bandwidth_update - program display watermarks
3763  *
3764  * @adev: amdgpu_device pointer
3765  *
3766  * Calculate and program the display watermarks and line buffer allocation.
3767  */
3768 static void dm_bandwidth_update(struct amdgpu_device *adev)
3769 {
3770         /* TODO: implement later */
3771 }
3772
3773 static const struct amdgpu_display_funcs dm_display_funcs = {
3774         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3775         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3776         .backlight_set_level = NULL, /* never called for DC */
3777         .backlight_get_level = NULL, /* never called for DC */
3778         .hpd_sense = NULL,/* called unconditionally */
3779         .hpd_set_polarity = NULL, /* called unconditionally */
3780         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3781         .page_flip_get_scanoutpos =
3782                 dm_crtc_get_scanoutpos,/* called unconditionally */
3783         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3784         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3785 };
3786
3787 #if defined(CONFIG_DEBUG_KERNEL_DC)
3788
3789 static ssize_t s3_debug_store(struct device *device,
3790                               struct device_attribute *attr,
3791                               const char *buf,
3792                               size_t count)
3793 {
3794         int ret;
3795         int s3_state;
3796         struct drm_device *drm_dev = dev_get_drvdata(device);
3797         struct amdgpu_device *adev = drm_to_adev(drm_dev);
3798
3799         ret = kstrtoint(buf, 0, &s3_state);
3800
3801         if (ret == 0) {
3802                 if (s3_state) {
3803                         dm_resume(adev);
3804                         drm_kms_helper_hotplug_event(adev_to_drm(adev));
3805                 } else
3806                         dm_suspend(adev);
3807         }
3808
3809         return ret == 0 ? count : 0;
3810 }
3811
3812 DEVICE_ATTR_WO(s3_debug);
3813
3814 #endif
3815
3816 static int dm_early_init(void *handle)
3817 {
3818         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3819
3820         switch (adev->asic_type) {
3821 #if defined(CONFIG_DRM_AMD_DC_SI)
3822         case CHIP_TAHITI:
3823         case CHIP_PITCAIRN:
3824         case CHIP_VERDE:
3825                 adev->mode_info.num_crtc = 6;
3826                 adev->mode_info.num_hpd = 6;
3827                 adev->mode_info.num_dig = 6;
3828                 break;
3829         case CHIP_OLAND:
3830                 adev->mode_info.num_crtc = 2;
3831                 adev->mode_info.num_hpd = 2;
3832                 adev->mode_info.num_dig = 2;
3833                 break;
3834 #endif
3835         case CHIP_BONAIRE:
3836         case CHIP_HAWAII:
3837                 adev->mode_info.num_crtc = 6;
3838                 adev->mode_info.num_hpd = 6;
3839                 adev->mode_info.num_dig = 6;
3840                 break;
3841         case CHIP_KAVERI:
3842                 adev->mode_info.num_crtc = 4;
3843                 adev->mode_info.num_hpd = 6;
3844                 adev->mode_info.num_dig = 7;
3845                 break;
3846         case CHIP_KABINI:
3847         case CHIP_MULLINS:
3848                 adev->mode_info.num_crtc = 2;
3849                 adev->mode_info.num_hpd = 6;
3850                 adev->mode_info.num_dig = 6;
3851                 break;
3852         case CHIP_FIJI:
3853         case CHIP_TONGA:
3854                 adev->mode_info.num_crtc = 6;
3855                 adev->mode_info.num_hpd = 6;
3856                 adev->mode_info.num_dig = 7;
3857                 break;
3858         case CHIP_CARRIZO:
3859                 adev->mode_info.num_crtc = 3;
3860                 adev->mode_info.num_hpd = 6;
3861                 adev->mode_info.num_dig = 9;
3862                 break;
3863         case CHIP_STONEY:
3864                 adev->mode_info.num_crtc = 2;
3865                 adev->mode_info.num_hpd = 6;
3866                 adev->mode_info.num_dig = 9;
3867                 break;
3868         case CHIP_POLARIS11:
3869         case CHIP_POLARIS12:
3870                 adev->mode_info.num_crtc = 5;
3871                 adev->mode_info.num_hpd = 5;
3872                 adev->mode_info.num_dig = 5;
3873                 break;
3874         case CHIP_POLARIS10:
3875         case CHIP_VEGAM:
3876                 adev->mode_info.num_crtc = 6;
3877                 adev->mode_info.num_hpd = 6;
3878                 adev->mode_info.num_dig = 6;
3879                 break;
3880         case CHIP_VEGA10:
3881         case CHIP_VEGA12:
3882         case CHIP_VEGA20:
3883                 adev->mode_info.num_crtc = 6;
3884                 adev->mode_info.num_hpd = 6;
3885                 adev->mode_info.num_dig = 6;
3886                 break;
3887 #if defined(CONFIG_DRM_AMD_DC_DCN)
3888         case CHIP_RAVEN:
3889         case CHIP_RENOIR:
3890         case CHIP_VANGOGH:
3891                 adev->mode_info.num_crtc = 4;
3892                 adev->mode_info.num_hpd = 4;
3893                 adev->mode_info.num_dig = 4;
3894                 break;
3895         case CHIP_NAVI10:
3896         case CHIP_NAVI12:
3897         case CHIP_SIENNA_CICHLID:
3898         case CHIP_NAVY_FLOUNDER:
3899                 adev->mode_info.num_crtc = 6;
3900                 adev->mode_info.num_hpd = 6;
3901                 adev->mode_info.num_dig = 6;
3902                 break;
3903         case CHIP_NAVI14:
3904         case CHIP_DIMGREY_CAVEFISH:
3905                 adev->mode_info.num_crtc = 5;
3906                 adev->mode_info.num_hpd = 5;
3907                 adev->mode_info.num_dig = 5;
3908                 break;
3909 #endif
3910         default:
3911                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3912                 return -EINVAL;
3913         }
3914
3915         amdgpu_dm_set_irq_funcs(adev);
3916
3917         if (adev->mode_info.funcs == NULL)
3918                 adev->mode_info.funcs = &dm_display_funcs;
3919
3920         /*
3921          * Note: Do NOT change adev->audio_endpt_rreg and
3922          * adev->audio_endpt_wreg because they are initialised in
3923          * amdgpu_device_init()
3924          */
3925 #if defined(CONFIG_DEBUG_KERNEL_DC)
3926         device_create_file(
3927                 adev_to_drm(adev)->dev,
3928                 &dev_attr_s3_debug);
3929 #endif
3930
3931         return 0;
3932 }
3933
3934 static bool modeset_required(struct drm_crtc_state *crtc_state,
3935                              struct dc_stream_state *new_stream,
3936                              struct dc_stream_state *old_stream)
3937 {
3938         return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3939 }
3940
3941 static bool modereset_required(struct drm_crtc_state *crtc_state)
3942 {
3943         return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3944 }
3945
3946 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3947 {
3948         drm_encoder_cleanup(encoder);
3949         kfree(encoder);
3950 }
3951
3952 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3953         .destroy = amdgpu_dm_encoder_destroy,
3954 };
3955
3956
3957 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
3958                                          struct drm_framebuffer *fb,
3959                                          int *min_downscale, int *max_upscale)
3960 {
3961         struct amdgpu_device *adev = drm_to_adev(dev);
3962         struct dc *dc = adev->dm.dc;
3963         /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
3964         struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
3965
3966         switch (fb->format->format) {
3967         case DRM_FORMAT_P010:
3968         case DRM_FORMAT_NV12:
3969         case DRM_FORMAT_NV21:
3970                 *max_upscale = plane_cap->max_upscale_factor.nv12;
3971                 *min_downscale = plane_cap->max_downscale_factor.nv12;
3972                 break;
3973
3974         case DRM_FORMAT_XRGB16161616F:
3975         case DRM_FORMAT_ARGB16161616F:
3976         case DRM_FORMAT_XBGR16161616F:
3977         case DRM_FORMAT_ABGR16161616F:
3978                 *max_upscale = plane_cap->max_upscale_factor.fp16;
3979                 *min_downscale = plane_cap->max_downscale_factor.fp16;
3980                 break;
3981
3982         default:
3983                 *max_upscale = plane_cap->max_upscale_factor.argb8888;
3984                 *min_downscale = plane_cap->max_downscale_factor.argb8888;
3985                 break;
3986         }
3987
3988         /*
3989          * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
3990          * scaling factor of 1.0 == 1000 units.
3991          */
3992         if (*max_upscale == 1)
3993                 *max_upscale = 1000;
3994
3995         if (*min_downscale == 1)
3996                 *min_downscale = 1000;
3997 }
3998
3999
4000 static int fill_dc_scaling_info(const struct drm_plane_state *state,
4001                                 struct dc_scaling_info *scaling_info)
4002 {
4003         int scale_w, scale_h, min_downscale, max_upscale;
4004
4005         memset(scaling_info, 0, sizeof(*scaling_info));
4006
4007         /* Source is fixed 16.16 but we ignore mantissa for now... */
4008         scaling_info->src_rect.x = state->src_x >> 16;
4009         scaling_info->src_rect.y = state->src_y >> 16;
4010
4011         scaling_info->src_rect.width = state->src_w >> 16;
4012         if (scaling_info->src_rect.width == 0)
4013                 return -EINVAL;
4014
4015         scaling_info->src_rect.height = state->src_h >> 16;
4016         if (scaling_info->src_rect.height == 0)
4017                 return -EINVAL;
4018
4019         scaling_info->dst_rect.x = state->crtc_x;
4020         scaling_info->dst_rect.y = state->crtc_y;
4021
4022         if (state->crtc_w == 0)
4023                 return -EINVAL;
4024
4025         scaling_info->dst_rect.width = state->crtc_w;
4026
4027         if (state->crtc_h == 0)
4028                 return -EINVAL;
4029
4030         scaling_info->dst_rect.height = state->crtc_h;
4031
4032         /* DRM doesn't specify clipping on destination output. */
4033         scaling_info->clip_rect = scaling_info->dst_rect;
4034
4035         /* Validate scaling per-format with DC plane caps */
4036         if (state->plane && state->plane->dev && state->fb) {
4037                 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4038                                              &min_downscale, &max_upscale);
4039         } else {
4040                 min_downscale = 250;
4041                 max_upscale = 16000;
4042         }
4043
4044         scale_w = scaling_info->dst_rect.width * 1000 /
4045                   scaling_info->src_rect.width;
4046
4047         if (scale_w < min_downscale || scale_w > max_upscale)
4048                 return -EINVAL;
4049
4050         scale_h = scaling_info->dst_rect.height * 1000 /
4051                   scaling_info->src_rect.height;
4052
4053         if (scale_h < min_downscale || scale_h > max_upscale)
4054                 return -EINVAL;
4055
4056         /*
4057          * The "scaling_quality" can be ignored for now, quality = 0 has DC
4058          * assume reasonable defaults based on the format.
4059          */
4060
4061         return 0;
4062 }
4063
4064 static void
4065 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4066                                  uint64_t tiling_flags)
4067 {
4068         /* Fill GFX8 params */
4069         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4070                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4071
4072                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4073                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4074                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4075                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4076                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4077
4078                 /* XXX fix me for VI */
4079                 tiling_info->gfx8.num_banks = num_banks;
4080                 tiling_info->gfx8.array_mode =
4081                                 DC_ARRAY_2D_TILED_THIN1;
4082                 tiling_info->gfx8.tile_split = tile_split;
4083                 tiling_info->gfx8.bank_width = bankw;
4084                 tiling_info->gfx8.bank_height = bankh;
4085                 tiling_info->gfx8.tile_aspect = mtaspect;
4086                 tiling_info->gfx8.tile_mode =
4087                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4088         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4089                         == DC_ARRAY_1D_TILED_THIN1) {
4090                 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4091         }
4092
4093         tiling_info->gfx8.pipe_config =
4094                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4095 }
4096
4097 static void
4098 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4099                                   union dc_tiling_info *tiling_info)
4100 {
4101         tiling_info->gfx9.num_pipes =
4102                 adev->gfx.config.gb_addr_config_fields.num_pipes;
4103         tiling_info->gfx9.num_banks =
4104                 adev->gfx.config.gb_addr_config_fields.num_banks;
4105         tiling_info->gfx9.pipe_interleave =
4106                 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4107         tiling_info->gfx9.num_shader_engines =
4108                 adev->gfx.config.gb_addr_config_fields.num_se;
4109         tiling_info->gfx9.max_compressed_frags =
4110                 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4111         tiling_info->gfx9.num_rb_per_se =
4112                 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4113         tiling_info->gfx9.shaderEnable = 1;
4114         if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4115             adev->asic_type == CHIP_NAVY_FLOUNDER ||
4116             adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4117             adev->asic_type == CHIP_VANGOGH)
4118                 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4119 }
4120
4121 static int
4122 validate_dcc(struct amdgpu_device *adev,
4123              const enum surface_pixel_format format,
4124              const enum dc_rotation_angle rotation,
4125              const union dc_tiling_info *tiling_info,
4126              const struct dc_plane_dcc_param *dcc,
4127              const struct dc_plane_address *address,
4128              const struct plane_size *plane_size)
4129 {
4130         struct dc *dc = adev->dm.dc;
4131         struct dc_dcc_surface_param input;
4132         struct dc_surface_dcc_cap output;
4133
4134         memset(&input, 0, sizeof(input));
4135         memset(&output, 0, sizeof(output));
4136
4137         if (!dcc->enable)
4138                 return 0;
4139
4140         if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4141             !dc->cap_funcs.get_dcc_compression_cap)
4142                 return -EINVAL;
4143
4144         input.format = format;
4145         input.surface_size.width = plane_size->surface_size.width;
4146         input.surface_size.height = plane_size->surface_size.height;
4147         input.swizzle_mode = tiling_info->gfx9.swizzle;
4148
4149         if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4150                 input.scan = SCAN_DIRECTION_HORIZONTAL;
4151         else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4152                 input.scan = SCAN_DIRECTION_VERTICAL;
4153
4154         if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4155                 return -EINVAL;
4156
4157         if (!output.capable)
4158                 return -EINVAL;
4159
4160         if (dcc->independent_64b_blks == 0 &&
4161             output.grph.rgb.independent_64b_blks != 0)
4162                 return -EINVAL;
4163
4164         return 0;
4165 }
4166
4167 static bool
4168 modifier_has_dcc(uint64_t modifier)
4169 {
4170         return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4171 }
4172
4173 static unsigned
4174 modifier_gfx9_swizzle_mode(uint64_t modifier)
4175 {
4176         if (modifier == DRM_FORMAT_MOD_LINEAR)
4177                 return 0;
4178
4179         return AMD_FMT_MOD_GET(TILE, modifier);
4180 }
4181
4182 static const struct drm_format_info *
4183 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4184 {
4185         return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4186 }
4187
4188 static void
4189 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4190                                     union dc_tiling_info *tiling_info,
4191                                     uint64_t modifier)
4192 {
4193         unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4194         unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4195         unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4196         unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4197
4198         fill_gfx9_tiling_info_from_device(adev, tiling_info);
4199
4200         if (!IS_AMD_FMT_MOD(modifier))
4201                 return;
4202
4203         tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4204         tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4205
4206         if (adev->family >= AMDGPU_FAMILY_NV) {
4207                 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4208         } else {
4209                 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4210
4211                 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4212         }
4213 }
4214
4215 enum dm_micro_swizzle {
4216         MICRO_SWIZZLE_Z = 0,
4217         MICRO_SWIZZLE_S = 1,
4218         MICRO_SWIZZLE_D = 2,
4219         MICRO_SWIZZLE_R = 3
4220 };
4221
4222 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4223                                           uint32_t format,
4224                                           uint64_t modifier)
4225 {
4226         struct amdgpu_device *adev = drm_to_adev(plane->dev);
4227         const struct drm_format_info *info = drm_format_info(format);
4228
4229         enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4230
4231         if (!info)
4232                 return false;
4233
4234         /*
4235          * We always have to allow this modifier, because core DRM still
4236          * checks LINEAR support if userspace does not provide modifers.
4237          */
4238         if (modifier == DRM_FORMAT_MOD_LINEAR)
4239                 return true;
4240
4241         /*
4242          * The arbitrary tiling support for multiplane formats has not been hooked
4243          * up.
4244          */
4245         if (info->num_planes > 1)
4246                 return false;
4247
4248         /*
4249          * For D swizzle the canonical modifier depends on the bpp, so check
4250          * it here.
4251          */
4252         if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4253             adev->family >= AMDGPU_FAMILY_NV) {
4254                 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4255                         return false;
4256         }
4257
4258         if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4259             info->cpp[0] < 8)
4260                 return false;
4261
4262         if (modifier_has_dcc(modifier)) {
4263                 /* Per radeonsi comments 16/64 bpp are more complicated. */
4264                 if (info->cpp[0] != 4)
4265                         return false;
4266         }
4267
4268         return true;
4269 }
4270
4271 static void
4272 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4273 {
4274         if (!*mods)
4275                 return;
4276
4277         if (*cap - *size < 1) {
4278                 uint64_t new_cap = *cap * 2;
4279                 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4280
4281                 if (!new_mods) {
4282                         kfree(*mods);
4283                         *mods = NULL;
4284                         return;
4285                 }
4286
4287                 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4288                 kfree(*mods);
4289                 *mods = new_mods;
4290                 *cap = new_cap;
4291         }
4292
4293         (*mods)[*size] = mod;
4294         *size += 1;
4295 }
4296
4297 static void
4298 add_gfx9_modifiers(const struct amdgpu_device *adev,
4299                    uint64_t **mods, uint64_t *size, uint64_t *capacity)
4300 {
4301         int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4302         int pipe_xor_bits = min(8, pipes +
4303                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4304         int bank_xor_bits = min(8 - pipe_xor_bits,
4305                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4306         int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4307                  ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4308
4309
4310         if (adev->family == AMDGPU_FAMILY_RV) {
4311                 /* Raven2 and later */
4312                 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4313
4314                 /*
4315                  * No _D DCC swizzles yet because we only allow 32bpp, which
4316                  * doesn't support _D on DCN
4317                  */
4318
4319                 if (has_constant_encode) {
4320                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4321                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4322                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4323                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4324                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4325                                     AMD_FMT_MOD_SET(DCC, 1) |
4326                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4327                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4328                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4329                 }
4330
4331                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4332                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4333                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4334                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4335                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4336                             AMD_FMT_MOD_SET(DCC, 1) |
4337                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4338                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4339                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4340
4341                 if (has_constant_encode) {
4342                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4343                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4344                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4345                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4346                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4347                                     AMD_FMT_MOD_SET(DCC, 1) |
4348                                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4349                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4350                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4351
4352                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4353                                     AMD_FMT_MOD_SET(RB, rb) |
4354                                     AMD_FMT_MOD_SET(PIPE, pipes));
4355                 }
4356
4357                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4358                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4359                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4360                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4361                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4362                             AMD_FMT_MOD_SET(DCC, 1) |
4363                             AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4364                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4365                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4366                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4367                             AMD_FMT_MOD_SET(RB, rb) |
4368                             AMD_FMT_MOD_SET(PIPE, pipes));
4369         }
4370
4371         /*
4372          * Only supported for 64bpp on Raven, will be filtered on format in
4373          * dm_plane_format_mod_supported.
4374          */
4375         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4376                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4377                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4378                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4379                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4380
4381         if (adev->family == AMDGPU_FAMILY_RV) {
4382                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4383                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4384                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4385                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4386                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4387         }
4388
4389         /*
4390          * Only supported for 64bpp on Raven, will be filtered on format in
4391          * dm_plane_format_mod_supported.
4392          */
4393         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4394                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4395                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4396
4397         if (adev->family == AMDGPU_FAMILY_RV) {
4398                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4399                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4400                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4401         }
4402 }
4403
4404 static void
4405 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4406                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
4407 {
4408         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4409
4410         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4411                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4412                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4413                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4414                     AMD_FMT_MOD_SET(DCC, 1) |
4415                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4416                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4417                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4418
4419         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4420                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4421                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4422                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4423                     AMD_FMT_MOD_SET(DCC, 1) |
4424                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4425                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4426                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4427                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4428
4429         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4430                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4431                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4432                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4433
4434         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4435                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4436                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4437                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4438
4439
4440         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4441         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4442                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4443                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4444
4445         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4446                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4447                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4448 }
4449
4450 static void
4451 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4452                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
4453 {
4454         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4455         int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4456
4457         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4458                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4459                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4460                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4461                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
4462                     AMD_FMT_MOD_SET(DCC, 1) |
4463                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4464                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4465                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4466                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4467
4468         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4469                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4470                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4471                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4472                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
4473                     AMD_FMT_MOD_SET(DCC, 1) |
4474                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4475                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4476                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4477                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4478                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4479
4480         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4481                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4482                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4483                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4484                     AMD_FMT_MOD_SET(PACKERS, pkrs));
4485
4486         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4487                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4488                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4489                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4490                     AMD_FMT_MOD_SET(PACKERS, pkrs));
4491
4492         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4493         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4494                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4495                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4496
4497         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4498                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4499                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4500 }
4501
4502 static int
4503 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4504 {
4505         uint64_t size = 0, capacity = 128;
4506         *mods = NULL;
4507
4508         /* We have not hooked up any pre-GFX9 modifiers. */
4509         if (adev->family < AMDGPU_FAMILY_AI)
4510                 return 0;
4511
4512         *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4513
4514         if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4515                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4516                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4517                 return *mods ? 0 : -ENOMEM;
4518         }
4519
4520         switch (adev->family) {
4521         case AMDGPU_FAMILY_AI:
4522         case AMDGPU_FAMILY_RV:
4523                 add_gfx9_modifiers(adev, mods, &size, &capacity);
4524                 break;
4525         case AMDGPU_FAMILY_NV:
4526         case AMDGPU_FAMILY_VGH:
4527                 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4528                         add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4529                 else
4530                         add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4531                 break;
4532         }
4533
4534         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4535
4536         /* INVALID marks the end of the list. */
4537         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4538
4539         if (!*mods)
4540                 return -ENOMEM;
4541
4542         return 0;
4543 }
4544
4545 static int
4546 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4547                                           const struct amdgpu_framebuffer *afb,
4548                                           const enum surface_pixel_format format,
4549                                           const enum dc_rotation_angle rotation,
4550                                           const struct plane_size *plane_size,
4551                                           union dc_tiling_info *tiling_info,
4552                                           struct dc_plane_dcc_param *dcc,
4553                                           struct dc_plane_address *address,
4554                                           const bool force_disable_dcc)
4555 {
4556         const uint64_t modifier = afb->base.modifier;
4557         int ret;
4558
4559         fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4560         tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4561
4562         if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4563                 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4564
4565                 dcc->enable = 1;
4566                 dcc->meta_pitch = afb->base.pitches[1];
4567                 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4568
4569                 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4570                 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4571         }
4572
4573         ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4574         if (ret)
4575                 return ret;
4576
4577         return 0;
4578 }
4579
4580 static int
4581 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4582                              const struct amdgpu_framebuffer *afb,
4583                              const enum surface_pixel_format format,
4584                              const enum dc_rotation_angle rotation,
4585                              const uint64_t tiling_flags,
4586                              union dc_tiling_info *tiling_info,
4587                              struct plane_size *plane_size,
4588                              struct dc_plane_dcc_param *dcc,
4589                              struct dc_plane_address *address,
4590                              bool tmz_surface,
4591                              bool force_disable_dcc)
4592 {
4593         const struct drm_framebuffer *fb = &afb->base;
4594         int ret;
4595
4596         memset(tiling_info, 0, sizeof(*tiling_info));
4597         memset(plane_size, 0, sizeof(*plane_size));
4598         memset(dcc, 0, sizeof(*dcc));
4599         memset(address, 0, sizeof(*address));
4600
4601         address->tmz_surface = tmz_surface;
4602
4603         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4604                 uint64_t addr = afb->address + fb->offsets[0];
4605
4606                 plane_size->surface_size.x = 0;
4607                 plane_size->surface_size.y = 0;
4608                 plane_size->surface_size.width = fb->width;
4609                 plane_size->surface_size.height = fb->height;
4610                 plane_size->surface_pitch =
4611                         fb->pitches[0] / fb->format->cpp[0];
4612
4613                 address->type = PLN_ADDR_TYPE_GRAPHICS;
4614                 address->grph.addr.low_part = lower_32_bits(addr);
4615                 address->grph.addr.high_part = upper_32_bits(addr);
4616         } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4617                 uint64_t luma_addr = afb->address + fb->offsets[0];
4618                 uint64_t chroma_addr = afb->address + fb->offsets[1];
4619
4620                 plane_size->surface_size.x = 0;
4621                 plane_size->surface_size.y = 0;
4622                 plane_size->surface_size.width = fb->width;
4623                 plane_size->surface_size.height = fb->height;
4624                 plane_size->surface_pitch =
4625                         fb->pitches[0] / fb->format->cpp[0];
4626
4627                 plane_size->chroma_size.x = 0;
4628                 plane_size->chroma_size.y = 0;
4629                 /* TODO: set these based on surface format */
4630                 plane_size->chroma_size.width = fb->width / 2;
4631                 plane_size->chroma_size.height = fb->height / 2;
4632
4633                 plane_size->chroma_pitch =
4634                         fb->pitches[1] / fb->format->cpp[1];
4635
4636                 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4637                 address->video_progressive.luma_addr.low_part =
4638                         lower_32_bits(luma_addr);
4639                 address->video_progressive.luma_addr.high_part =
4640                         upper_32_bits(luma_addr);
4641                 address->video_progressive.chroma_addr.low_part =
4642                         lower_32_bits(chroma_addr);
4643                 address->video_progressive.chroma_addr.high_part =
4644                         upper_32_bits(chroma_addr);
4645         }
4646
4647         if (adev->family >= AMDGPU_FAMILY_AI) {
4648                 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4649                                                                 rotation, plane_size,
4650                                                                 tiling_info, dcc,
4651                                                                 address,
4652                                                                 force_disable_dcc);
4653                 if (ret)
4654                         return ret;
4655         } else {
4656                 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4657         }
4658
4659         return 0;
4660 }
4661
4662 static void
4663 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4664                                bool *per_pixel_alpha, bool *global_alpha,
4665                                int *global_alpha_value)
4666 {
4667         *per_pixel_alpha = false;
4668         *global_alpha = false;
4669         *global_alpha_value = 0xff;
4670
4671         if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4672                 return;
4673
4674         if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4675                 static const uint32_t alpha_formats[] = {
4676                         DRM_FORMAT_ARGB8888,
4677                         DRM_FORMAT_RGBA8888,
4678                         DRM_FORMAT_ABGR8888,
4679                 };
4680                 uint32_t format = plane_state->fb->format->format;
4681                 unsigned int i;
4682
4683                 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4684                         if (format == alpha_formats[i]) {
4685                                 *per_pixel_alpha = true;
4686                                 break;
4687                         }
4688                 }
4689         }
4690
4691         if (plane_state->alpha < 0xffff) {
4692                 *global_alpha = true;
4693                 *global_alpha_value = plane_state->alpha >> 8;
4694         }
4695 }
4696
4697 static int
4698 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4699                             const enum surface_pixel_format format,
4700                             enum dc_color_space *color_space)
4701 {
4702         bool full_range;
4703
4704         *color_space = COLOR_SPACE_SRGB;
4705
4706         /* DRM color properties only affect non-RGB formats. */
4707         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4708                 return 0;
4709
4710         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4711
4712         switch (plane_state->color_encoding) {
4713         case DRM_COLOR_YCBCR_BT601:
4714                 if (full_range)
4715                         *color_space = COLOR_SPACE_YCBCR601;
4716                 else
4717                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4718                 break;
4719
4720         case DRM_COLOR_YCBCR_BT709:
4721                 if (full_range)
4722                         *color_space = COLOR_SPACE_YCBCR709;
4723                 else
4724                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4725                 break;
4726
4727         case DRM_COLOR_YCBCR_BT2020:
4728                 if (full_range)
4729                         *color_space = COLOR_SPACE_2020_YCBCR;
4730                 else
4731                         return -EINVAL;
4732                 break;
4733
4734         default:
4735                 return -EINVAL;
4736         }
4737
4738         return 0;
4739 }
4740
4741 static int
4742 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4743                             const struct drm_plane_state *plane_state,
4744                             const uint64_t tiling_flags,
4745                             struct dc_plane_info *plane_info,
4746                             struct dc_plane_address *address,
4747                             bool tmz_surface,
4748                             bool force_disable_dcc)
4749 {
4750         const struct drm_framebuffer *fb = plane_state->fb;
4751         const struct amdgpu_framebuffer *afb =
4752                 to_amdgpu_framebuffer(plane_state->fb);
4753         int ret;
4754
4755         memset(plane_info, 0, sizeof(*plane_info));
4756
4757         switch (fb->format->format) {
4758         case DRM_FORMAT_C8:
4759                 plane_info->format =
4760                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4761                 break;
4762         case DRM_FORMAT_RGB565:
4763                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4764                 break;
4765         case DRM_FORMAT_XRGB8888:
4766         case DRM_FORMAT_ARGB8888:
4767                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4768                 break;
4769         case DRM_FORMAT_XRGB2101010:
4770         case DRM_FORMAT_ARGB2101010:
4771                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4772                 break;
4773         case DRM_FORMAT_XBGR2101010:
4774         case DRM_FORMAT_ABGR2101010:
4775                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4776                 break;
4777         case DRM_FORMAT_XBGR8888:
4778         case DRM_FORMAT_ABGR8888:
4779                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4780                 break;
4781         case DRM_FORMAT_NV21:
4782                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4783                 break;
4784         case DRM_FORMAT_NV12:
4785                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4786                 break;
4787         case DRM_FORMAT_P010:
4788                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4789                 break;
4790         case DRM_FORMAT_XRGB16161616F:
4791         case DRM_FORMAT_ARGB16161616F:
4792                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4793                 break;
4794         case DRM_FORMAT_XBGR16161616F:
4795         case DRM_FORMAT_ABGR16161616F:
4796                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4797                 break;
4798         default:
4799                 DRM_ERROR(
4800                         "Unsupported screen format %p4cc\n",
4801                         &fb->format->format);
4802                 return -EINVAL;
4803         }
4804
4805         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4806         case DRM_MODE_ROTATE_0:
4807                 plane_info->rotation = ROTATION_ANGLE_0;
4808                 break;
4809         case DRM_MODE_ROTATE_90:
4810                 plane_info->rotation = ROTATION_ANGLE_90;
4811                 break;
4812         case DRM_MODE_ROTATE_180:
4813                 plane_info->rotation = ROTATION_ANGLE_180;
4814                 break;
4815         case DRM_MODE_ROTATE_270:
4816                 plane_info->rotation = ROTATION_ANGLE_270;
4817                 break;
4818         default:
4819                 plane_info->rotation = ROTATION_ANGLE_0;
4820                 break;
4821         }
4822
4823         plane_info->visible = true;
4824         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4825
4826         plane_info->layer_index = 0;
4827
4828         ret = fill_plane_color_attributes(plane_state, plane_info->format,
4829                                           &plane_info->color_space);
4830         if (ret)
4831                 return ret;
4832
4833         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4834                                            plane_info->rotation, tiling_flags,
4835                                            &plane_info->tiling_info,
4836                                            &plane_info->plane_size,
4837                                            &plane_info->dcc, address, tmz_surface,
4838                                            force_disable_dcc);
4839         if (ret)
4840                 return ret;
4841
4842         fill_blending_from_plane_state(
4843                 plane_state, &plane_info->per_pixel_alpha,
4844                 &plane_info->global_alpha, &plane_info->global_alpha_value);
4845
4846         return 0;
4847 }
4848
4849 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4850                                     struct dc_plane_state *dc_plane_state,
4851                                     struct drm_plane_state *plane_state,
4852                                     struct drm_crtc_state *crtc_state)
4853 {
4854         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4855         struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4856         struct dc_scaling_info scaling_info;
4857         struct dc_plane_info plane_info;
4858         int ret;
4859         bool force_disable_dcc = false;
4860
4861         ret = fill_dc_scaling_info(plane_state, &scaling_info);
4862         if (ret)
4863                 return ret;
4864
4865         dc_plane_state->src_rect = scaling_info.src_rect;
4866         dc_plane_state->dst_rect = scaling_info.dst_rect;
4867         dc_plane_state->clip_rect = scaling_info.clip_rect;
4868         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4869
4870         force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4871         ret = fill_dc_plane_info_and_addr(adev, plane_state,
4872                                           afb->tiling_flags,
4873                                           &plane_info,
4874                                           &dc_plane_state->address,
4875                                           afb->tmz_surface,
4876                                           force_disable_dcc);
4877         if (ret)
4878                 return ret;
4879
4880         dc_plane_state->format = plane_info.format;
4881         dc_plane_state->color_space = plane_info.color_space;
4882         dc_plane_state->format = plane_info.format;
4883         dc_plane_state->plane_size = plane_info.plane_size;
4884         dc_plane_state->rotation = plane_info.rotation;
4885         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4886         dc_plane_state->stereo_format = plane_info.stereo_format;
4887         dc_plane_state->tiling_info = plane_info.tiling_info;
4888         dc_plane_state->visible = plane_info.visible;
4889         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4890         dc_plane_state->global_alpha = plane_info.global_alpha;
4891         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4892         dc_plane_state->dcc = plane_info.dcc;
4893         dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4894         dc_plane_state->flip_int_enabled = true;
4895
4896         /*
4897          * Always set input transfer function, since plane state is refreshed
4898          * every time.
4899          */
4900         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4901         if (ret)
4902                 return ret;
4903
4904         return 0;
4905 }
4906
4907 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4908                                            const struct dm_connector_state *dm_state,
4909                                            struct dc_stream_state *stream)
4910 {
4911         enum amdgpu_rmx_type rmx_type;
4912
4913         struct rect src = { 0 }; /* viewport in composition space*/
4914         struct rect dst = { 0 }; /* stream addressable area */
4915
4916         /* no mode. nothing to be done */
4917         if (!mode)
4918                 return;
4919
4920         /* Full screen scaling by default */
4921         src.width = mode->hdisplay;
4922         src.height = mode->vdisplay;
4923         dst.width = stream->timing.h_addressable;
4924         dst.height = stream->timing.v_addressable;
4925
4926         if (dm_state) {
4927                 rmx_type = dm_state->scaling;
4928                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4929                         if (src.width * dst.height <
4930                                         src.height * dst.width) {
4931                                 /* height needs less upscaling/more downscaling */
4932                                 dst.width = src.width *
4933                                                 dst.height / src.height;
4934                         } else {
4935                                 /* width needs less upscaling/more downscaling */
4936                                 dst.height = src.height *
4937                                                 dst.width / src.width;
4938                         }
4939                 } else if (rmx_type == RMX_CENTER) {
4940                         dst = src;
4941                 }
4942
4943                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4944                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
4945
4946                 if (dm_state->underscan_enable) {
4947                         dst.x += dm_state->underscan_hborder / 2;
4948                         dst.y += dm_state->underscan_vborder / 2;
4949                         dst.width -= dm_state->underscan_hborder;
4950                         dst.height -= dm_state->underscan_vborder;
4951                 }
4952         }
4953
4954         stream->src = src;
4955         stream->dst = dst;
4956
4957         DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4958                       dst.x, dst.y, dst.width, dst.height);
4959
4960 }
4961
4962 static enum dc_color_depth
4963 convert_color_depth_from_display_info(const struct drm_connector *connector,
4964                                       bool is_y420, int requested_bpc)
4965 {
4966         uint8_t bpc;
4967
4968         if (is_y420) {
4969                 bpc = 8;
4970
4971                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4972                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4973                         bpc = 16;
4974                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4975                         bpc = 12;
4976                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4977                         bpc = 10;
4978         } else {
4979                 bpc = (uint8_t)connector->display_info.bpc;
4980                 /* Assume 8 bpc by default if no bpc is specified. */
4981                 bpc = bpc ? bpc : 8;
4982         }
4983
4984         if (requested_bpc > 0) {
4985                 /*
4986                  * Cap display bpc based on the user requested value.
4987                  *
4988                  * The value for state->max_bpc may not correctly updated
4989                  * depending on when the connector gets added to the state
4990                  * or if this was called outside of atomic check, so it
4991                  * can't be used directly.
4992                  */
4993                 bpc = min_t(u8, bpc, requested_bpc);
4994
4995                 /* Round down to the nearest even number. */
4996                 bpc = bpc - (bpc & 1);
4997         }
4998
4999         switch (bpc) {
5000         case 0:
5001                 /*
5002                  * Temporary Work around, DRM doesn't parse color depth for
5003                  * EDID revision before 1.4
5004                  * TODO: Fix edid parsing
5005                  */
5006                 return COLOR_DEPTH_888;
5007         case 6:
5008                 return COLOR_DEPTH_666;
5009         case 8:
5010                 return COLOR_DEPTH_888;
5011         case 10:
5012                 return COLOR_DEPTH_101010;
5013         case 12:
5014                 return COLOR_DEPTH_121212;
5015         case 14:
5016                 return COLOR_DEPTH_141414;
5017         case 16:
5018                 return COLOR_DEPTH_161616;
5019         default:
5020                 return COLOR_DEPTH_UNDEFINED;
5021         }
5022 }
5023
5024 static enum dc_aspect_ratio
5025 get_aspect_ratio(const struct drm_display_mode *mode_in)
5026 {
5027         /* 1-1 mapping, since both enums follow the HDMI spec. */
5028         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5029 }
5030
5031 static enum dc_color_space
5032 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5033 {
5034         enum dc_color_space color_space = COLOR_SPACE_SRGB;
5035
5036         switch (dc_crtc_timing->pixel_encoding) {
5037         case PIXEL_ENCODING_YCBCR422:
5038         case PIXEL_ENCODING_YCBCR444:
5039         case PIXEL_ENCODING_YCBCR420:
5040         {
5041                 /*
5042                  * 27030khz is the separation point between HDTV and SDTV
5043                  * according to HDMI spec, we use YCbCr709 and YCbCr601
5044                  * respectively
5045                  */
5046                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
5047                         if (dc_crtc_timing->flags.Y_ONLY)
5048                                 color_space =
5049                                         COLOR_SPACE_YCBCR709_LIMITED;
5050                         else
5051                                 color_space = COLOR_SPACE_YCBCR709;
5052                 } else {
5053                         if (dc_crtc_timing->flags.Y_ONLY)
5054                                 color_space =
5055                                         COLOR_SPACE_YCBCR601_LIMITED;
5056                         else
5057                                 color_space = COLOR_SPACE_YCBCR601;
5058                 }
5059
5060         }
5061         break;
5062         case PIXEL_ENCODING_RGB:
5063                 color_space = COLOR_SPACE_SRGB;
5064                 break;
5065
5066         default:
5067                 WARN_ON(1);
5068                 break;
5069         }
5070
5071         return color_space;
5072 }
5073
5074 static bool adjust_colour_depth_from_display_info(
5075         struct dc_crtc_timing *timing_out,
5076         const struct drm_display_info *info)
5077 {
5078         enum dc_color_depth depth = timing_out->display_color_depth;
5079         int normalized_clk;
5080         do {
5081                 normalized_clk = timing_out->pix_clk_100hz / 10;
5082                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5083                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5084                         normalized_clk /= 2;
5085                 /* Adjusting pix clock following on HDMI spec based on colour depth */
5086                 switch (depth) {
5087                 case COLOR_DEPTH_888:
5088                         break;
5089                 case COLOR_DEPTH_101010:
5090                         normalized_clk = (normalized_clk * 30) / 24;
5091                         break;
5092                 case COLOR_DEPTH_121212:
5093                         normalized_clk = (normalized_clk * 36) / 24;
5094                         break;
5095                 case COLOR_DEPTH_161616:
5096                         normalized_clk = (normalized_clk * 48) / 24;
5097                         break;
5098                 default:
5099                         /* The above depths are the only ones valid for HDMI. */
5100                         return false;
5101                 }
5102                 if (normalized_clk <= info->max_tmds_clock) {
5103                         timing_out->display_color_depth = depth;
5104                         return true;
5105                 }
5106         } while (--depth > COLOR_DEPTH_666);
5107         return false;
5108 }
5109
5110 static void fill_stream_properties_from_drm_display_mode(
5111         struct dc_stream_state *stream,
5112         const struct drm_display_mode *mode_in,
5113         const struct drm_connector *connector,
5114         const struct drm_connector_state *connector_state,
5115         const struct dc_stream_state *old_stream,
5116         int requested_bpc)
5117 {
5118         struct dc_crtc_timing *timing_out = &stream->timing;
5119         const struct drm_display_info *info = &connector->display_info;
5120         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5121         struct hdmi_vendor_infoframe hv_frame;
5122         struct hdmi_avi_infoframe avi_frame;
5123
5124         memset(&hv_frame, 0, sizeof(hv_frame));
5125         memset(&avi_frame, 0, sizeof(avi_frame));
5126
5127         timing_out->h_border_left = 0;
5128         timing_out->h_border_right = 0;
5129         timing_out->v_border_top = 0;
5130         timing_out->v_border_bottom = 0;
5131         /* TODO: un-hardcode */
5132         if (drm_mode_is_420_only(info, mode_in)
5133                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5134                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5135         else if (drm_mode_is_420_also(info, mode_in)
5136                         && aconnector->force_yuv420_output)
5137                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5138         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5139                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5140                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5141         else
5142                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5143
5144         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5145         timing_out->display_color_depth = convert_color_depth_from_display_info(
5146                 connector,
5147                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5148                 requested_bpc);
5149         timing_out->scan_type = SCANNING_TYPE_NODATA;
5150         timing_out->hdmi_vic = 0;
5151
5152         if(old_stream) {
5153                 timing_out->vic = old_stream->timing.vic;
5154                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5155                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5156         } else {
5157                 timing_out->vic = drm_match_cea_mode(mode_in);
5158                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5159                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5160                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5161                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5162         }
5163
5164         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5165                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5166                 timing_out->vic = avi_frame.video_code;
5167                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5168                 timing_out->hdmi_vic = hv_frame.vic;
5169         }
5170
5171         if (is_freesync_video_mode(mode_in, aconnector)) {
5172                 timing_out->h_addressable = mode_in->hdisplay;
5173                 timing_out->h_total = mode_in->htotal;
5174                 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5175                 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5176                 timing_out->v_total = mode_in->vtotal;
5177                 timing_out->v_addressable = mode_in->vdisplay;
5178                 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5179                 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5180                 timing_out->pix_clk_100hz = mode_in->clock * 10;
5181         } else {
5182                 timing_out->h_addressable = mode_in->crtc_hdisplay;
5183                 timing_out->h_total = mode_in->crtc_htotal;
5184                 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5185                 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5186                 timing_out->v_total = mode_in->crtc_vtotal;
5187                 timing_out->v_addressable = mode_in->crtc_vdisplay;
5188                 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5189                 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5190                 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5191         }
5192
5193         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5194
5195         stream->output_color_space = get_output_color_space(timing_out);
5196
5197         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5198         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5199         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5200                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5201                     drm_mode_is_420_also(info, mode_in) &&
5202                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5203                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5204                         adjust_colour_depth_from_display_info(timing_out, info);
5205                 }
5206         }
5207 }
5208
5209 static void fill_audio_info(struct audio_info *audio_info,
5210                             const struct drm_connector *drm_connector,
5211                             const struct dc_sink *dc_sink)
5212 {
5213         int i = 0;
5214         int cea_revision = 0;
5215         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5216
5217         audio_info->manufacture_id = edid_caps->manufacturer_id;
5218         audio_info->product_id = edid_caps->product_id;
5219
5220         cea_revision = drm_connector->display_info.cea_rev;
5221
5222         strscpy(audio_info->display_name,
5223                 edid_caps->display_name,
5224                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5225
5226         if (cea_revision >= 3) {
5227                 audio_info->mode_count = edid_caps->audio_mode_count;
5228
5229                 for (i = 0; i < audio_info->mode_count; ++i) {
5230                         audio_info->modes[i].format_code =
5231                                         (enum audio_format_code)
5232                                         (edid_caps->audio_modes[i].format_code);
5233                         audio_info->modes[i].channel_count =
5234                                         edid_caps->audio_modes[i].channel_count;
5235                         audio_info->modes[i].sample_rates.all =
5236                                         edid_caps->audio_modes[i].sample_rate;
5237                         audio_info->modes[i].sample_size =
5238                                         edid_caps->audio_modes[i].sample_size;
5239                 }
5240         }
5241
5242         audio_info->flags.all = edid_caps->speaker_flags;
5243
5244         /* TODO: We only check for the progressive mode, check for interlace mode too */
5245         if (drm_connector->latency_present[0]) {
5246                 audio_info->video_latency = drm_connector->video_latency[0];
5247                 audio_info->audio_latency = drm_connector->audio_latency[0];
5248         }
5249
5250         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5251
5252 }
5253
5254 static void
5255 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5256                                       struct drm_display_mode *dst_mode)
5257 {
5258         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5259         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5260         dst_mode->crtc_clock = src_mode->crtc_clock;
5261         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5262         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5263         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5264         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5265         dst_mode->crtc_htotal = src_mode->crtc_htotal;
5266         dst_mode->crtc_hskew = src_mode->crtc_hskew;
5267         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5268         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5269         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5270         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5271         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5272 }
5273
5274 static void
5275 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5276                                         const struct drm_display_mode *native_mode,
5277                                         bool scale_enabled)
5278 {
5279         if (scale_enabled) {
5280                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5281         } else if (native_mode->clock == drm_mode->clock &&
5282                         native_mode->htotal == drm_mode->htotal &&
5283                         native_mode->vtotal == drm_mode->vtotal) {
5284                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5285         } else {
5286                 /* no scaling nor amdgpu inserted, no need to patch */
5287         }
5288 }
5289
5290 static struct dc_sink *
5291 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5292 {
5293         struct dc_sink_init_data sink_init_data = { 0 };
5294         struct dc_sink *sink = NULL;
5295         sink_init_data.link = aconnector->dc_link;
5296         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5297
5298         sink = dc_sink_create(&sink_init_data);
5299         if (!sink) {
5300                 DRM_ERROR("Failed to create sink!\n");
5301                 return NULL;
5302         }
5303         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5304
5305         return sink;
5306 }
5307
5308 static void set_multisync_trigger_params(
5309                 struct dc_stream_state *stream)
5310 {
5311         struct dc_stream_state *master = NULL;
5312
5313         if (stream->triggered_crtc_reset.enabled) {
5314                 master = stream->triggered_crtc_reset.event_source;
5315                 stream->triggered_crtc_reset.event =
5316                         master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5317                         CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5318                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5319         }
5320 }
5321
5322 static void set_master_stream(struct dc_stream_state *stream_set[],
5323                               int stream_count)
5324 {
5325         int j, highest_rfr = 0, master_stream = 0;
5326
5327         for (j = 0;  j < stream_count; j++) {
5328                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5329                         int refresh_rate = 0;
5330
5331                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5332                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5333                         if (refresh_rate > highest_rfr) {
5334                                 highest_rfr = refresh_rate;
5335                                 master_stream = j;
5336                         }
5337                 }
5338         }
5339         for (j = 0;  j < stream_count; j++) {
5340                 if (stream_set[j])
5341                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5342         }
5343 }
5344
5345 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5346 {
5347         int i = 0;
5348         struct dc_stream_state *stream;
5349
5350         if (context->stream_count < 2)
5351                 return;
5352         for (i = 0; i < context->stream_count ; i++) {
5353                 if (!context->streams[i])
5354                         continue;
5355                 /*
5356                  * TODO: add a function to read AMD VSDB bits and set
5357                  * crtc_sync_master.multi_sync_enabled flag
5358                  * For now it's set to false
5359                  */
5360         }
5361
5362         set_master_stream(context->streams, context->stream_count);
5363
5364         for (i = 0; i < context->stream_count ; i++) {
5365                 stream = context->streams[i];
5366
5367                 if (!stream)
5368                         continue;
5369
5370                 set_multisync_trigger_params(stream);
5371         }
5372 }
5373
5374 static struct drm_display_mode *
5375 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5376                           bool use_probed_modes)
5377 {
5378         struct drm_display_mode *m, *m_pref = NULL;
5379         u16 current_refresh, highest_refresh;
5380         struct list_head *list_head = use_probed_modes ?
5381                                                     &aconnector->base.probed_modes :
5382                                                     &aconnector->base.modes;
5383
5384         if (aconnector->freesync_vid_base.clock != 0)
5385                 return &aconnector->freesync_vid_base;
5386
5387         /* Find the preferred mode */
5388         list_for_each_entry (m, list_head, head) {
5389                 if (m->type & DRM_MODE_TYPE_PREFERRED) {
5390                         m_pref = m;
5391                         break;
5392                 }
5393         }
5394
5395         if (!m_pref) {
5396                 /* Probably an EDID with no preferred mode. Fallback to first entry */
5397                 m_pref = list_first_entry_or_null(
5398                         &aconnector->base.modes, struct drm_display_mode, head);
5399                 if (!m_pref) {
5400                         DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5401                         return NULL;
5402                 }
5403         }
5404
5405         highest_refresh = drm_mode_vrefresh(m_pref);
5406
5407         /*
5408          * Find the mode with highest refresh rate with same resolution.
5409          * For some monitors, preferred mode is not the mode with highest
5410          * supported refresh rate.
5411          */
5412         list_for_each_entry (m, list_head, head) {
5413                 current_refresh  = drm_mode_vrefresh(m);
5414
5415                 if (m->hdisplay == m_pref->hdisplay &&
5416                     m->vdisplay == m_pref->vdisplay &&
5417                     highest_refresh < current_refresh) {
5418                         highest_refresh = current_refresh;
5419                         m_pref = m;
5420                 }
5421         }
5422
5423         aconnector->freesync_vid_base = *m_pref;
5424         return m_pref;
5425 }
5426
5427 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
5428                                    struct amdgpu_dm_connector *aconnector)
5429 {
5430         struct drm_display_mode *high_mode;
5431         int timing_diff;
5432
5433         high_mode = get_highest_refresh_rate_mode(aconnector, false);
5434         if (!high_mode || !mode)
5435                 return false;
5436
5437         timing_diff = high_mode->vtotal - mode->vtotal;
5438
5439         if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5440             high_mode->hdisplay != mode->hdisplay ||
5441             high_mode->vdisplay != mode->vdisplay ||
5442             high_mode->hsync_start != mode->hsync_start ||
5443             high_mode->hsync_end != mode->hsync_end ||
5444             high_mode->htotal != mode->htotal ||
5445             high_mode->hskew != mode->hskew ||
5446             high_mode->vscan != mode->vscan ||
5447             high_mode->vsync_start - mode->vsync_start != timing_diff ||
5448             high_mode->vsync_end - mode->vsync_end != timing_diff)
5449                 return false;
5450         else
5451                 return true;
5452 }
5453
5454 static struct dc_stream_state *
5455 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5456                        const struct drm_display_mode *drm_mode,
5457                        const struct dm_connector_state *dm_state,
5458                        const struct dc_stream_state *old_stream,
5459                        int requested_bpc)
5460 {
5461         struct drm_display_mode *preferred_mode = NULL;
5462         struct drm_connector *drm_connector;
5463         const struct drm_connector_state *con_state =
5464                 dm_state ? &dm_state->base : NULL;
5465         struct dc_stream_state *stream = NULL;
5466         struct drm_display_mode mode = *drm_mode;
5467         struct drm_display_mode saved_mode;
5468         struct drm_display_mode *freesync_mode = NULL;
5469         bool native_mode_found = false;
5470         bool recalculate_timing = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5471         int mode_refresh;
5472         int preferred_refresh = 0;
5473 #if defined(CONFIG_DRM_AMD_DC_DCN)
5474         struct dsc_dec_dpcd_caps dsc_caps;
5475         uint32_t link_bandwidth_kbps;
5476 #endif
5477         struct dc_sink *sink = NULL;
5478
5479         memset(&saved_mode, 0, sizeof(saved_mode));
5480
5481         if (aconnector == NULL) {
5482                 DRM_ERROR("aconnector is NULL!\n");
5483                 return stream;
5484         }
5485
5486         drm_connector = &aconnector->base;
5487
5488         if (!aconnector->dc_sink) {
5489                 sink = create_fake_sink(aconnector);
5490                 if (!sink)
5491                         return stream;
5492         } else {
5493                 sink = aconnector->dc_sink;
5494                 dc_sink_retain(sink);
5495         }
5496
5497         stream = dc_create_stream_for_sink(sink);
5498
5499         if (stream == NULL) {
5500                 DRM_ERROR("Failed to create stream for sink!\n");
5501                 goto finish;
5502         }
5503
5504         stream->dm_stream_context = aconnector;
5505
5506         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5507                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5508
5509         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5510                 /* Search for preferred mode */
5511                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5512                         native_mode_found = true;
5513                         break;
5514                 }
5515         }
5516         if (!native_mode_found)
5517                 preferred_mode = list_first_entry_or_null(
5518                                 &aconnector->base.modes,
5519                                 struct drm_display_mode,
5520                                 head);
5521
5522         mode_refresh = drm_mode_vrefresh(&mode);
5523
5524         if (preferred_mode == NULL) {
5525                 /*
5526                  * This may not be an error, the use case is when we have no
5527                  * usermode calls to reset and set mode upon hotplug. In this
5528                  * case, we call set mode ourselves to restore the previous mode
5529                  * and the modelist may not be filled in in time.
5530                  */
5531                 DRM_DEBUG_DRIVER("No preferred mode found\n");
5532         } else {
5533                 recalculate_timing |= amdgpu_freesync_vid_mode &&
5534                                  is_freesync_video_mode(&mode, aconnector);
5535                 if (recalculate_timing) {
5536                         freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5537                         saved_mode = mode;
5538                         mode = *freesync_mode;
5539                 } else {
5540                         decide_crtc_timing_for_drm_display_mode(
5541                                 &mode, preferred_mode,
5542                                 dm_state ? (dm_state->scaling != RMX_OFF) : false);
5543                 }
5544
5545                 preferred_refresh = drm_mode_vrefresh(preferred_mode);
5546         }
5547
5548         if (recalculate_timing)
5549                 drm_mode_set_crtcinfo(&saved_mode, 0);
5550         else if (!dm_state)
5551                 drm_mode_set_crtcinfo(&mode, 0);
5552
5553        /*
5554         * If scaling is enabled and refresh rate didn't change
5555         * we copy the vic and polarities of the old timings
5556         */
5557         if (!recalculate_timing || mode_refresh != preferred_refresh)
5558                 fill_stream_properties_from_drm_display_mode(
5559                         stream, &mode, &aconnector->base, con_state, NULL,
5560                         requested_bpc);
5561         else
5562                 fill_stream_properties_from_drm_display_mode(
5563                         stream, &mode, &aconnector->base, con_state, old_stream,
5564                         requested_bpc);
5565
5566         stream->timing.flags.DSC = 0;
5567
5568         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5569 #if defined(CONFIG_DRM_AMD_DC_DCN)
5570                 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5571                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5572                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5573                                       &dsc_caps);
5574                 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5575                                                              dc_link_get_link_cap(aconnector->dc_link));
5576
5577                 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5578                         /* Set DSC policy according to dsc_clock_en */
5579                         dc_dsc_policy_set_enable_dsc_when_not_needed(
5580                                 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5581
5582                         if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5583                                                   &dsc_caps,
5584                                                   aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5585                                                   0,
5586                                                   link_bandwidth_kbps,
5587                                                   &stream->timing,
5588                                                   &stream->timing.dsc_cfg))
5589                                 stream->timing.flags.DSC = 1;
5590                         /* Overwrite the stream flag if DSC is enabled through debugfs */
5591                         if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5592                                 stream->timing.flags.DSC = 1;
5593
5594                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5595                                 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5596
5597                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5598                                 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5599
5600                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5601                                 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5602                 }
5603 #endif
5604         }
5605
5606         update_stream_scaling_settings(&mode, dm_state, stream);
5607
5608         fill_audio_info(
5609                 &stream->audio_info,
5610                 drm_connector,
5611                 sink);
5612
5613         update_stream_signal(stream, sink);
5614
5615         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5616                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5617
5618         if (stream->link->psr_settings.psr_feature_enabled) {
5619                 //
5620                 // should decide stream support vsc sdp colorimetry capability
5621                 // before building vsc info packet
5622                 //
5623                 stream->use_vsc_sdp_for_colorimetry = false;
5624                 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5625                         stream->use_vsc_sdp_for_colorimetry =
5626                                 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5627                 } else {
5628                         if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5629                                 stream->use_vsc_sdp_for_colorimetry = true;
5630                 }
5631                 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5632         }
5633 finish:
5634         dc_sink_release(sink);
5635
5636         return stream;
5637 }
5638
5639 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5640 {
5641         drm_crtc_cleanup(crtc);
5642         kfree(crtc);
5643 }
5644
5645 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5646                                   struct drm_crtc_state *state)
5647 {
5648         struct dm_crtc_state *cur = to_dm_crtc_state(state);
5649
5650         /* TODO Destroy dc_stream objects are stream object is flattened */
5651         if (cur->stream)
5652                 dc_stream_release(cur->stream);
5653
5654
5655         __drm_atomic_helper_crtc_destroy_state(state);
5656
5657
5658         kfree(state);
5659 }
5660
5661 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5662 {
5663         struct dm_crtc_state *state;
5664
5665         if (crtc->state)
5666                 dm_crtc_destroy_state(crtc, crtc->state);
5667
5668         state = kzalloc(sizeof(*state), GFP_KERNEL);
5669         if (WARN_ON(!state))
5670                 return;
5671
5672         __drm_atomic_helper_crtc_reset(crtc, &state->base);
5673 }
5674
5675 static struct drm_crtc_state *
5676 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5677 {
5678         struct dm_crtc_state *state, *cur;
5679
5680         cur = to_dm_crtc_state(crtc->state);
5681
5682         if (WARN_ON(!crtc->state))
5683                 return NULL;
5684
5685         state = kzalloc(sizeof(*state), GFP_KERNEL);
5686         if (!state)
5687                 return NULL;
5688
5689         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5690
5691         if (cur->stream) {
5692                 state->stream = cur->stream;
5693                 dc_stream_retain(state->stream);
5694         }
5695
5696         state->active_planes = cur->active_planes;
5697         state->vrr_infopacket = cur->vrr_infopacket;
5698         state->abm_level = cur->abm_level;
5699         state->vrr_supported = cur->vrr_supported;
5700         state->freesync_config = cur->freesync_config;
5701         state->cm_has_degamma = cur->cm_has_degamma;
5702         state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5703         /* TODO Duplicate dc_stream after objects are stream object is flattened */
5704
5705         return &state->base;
5706 }
5707
5708 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
5709 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
5710 {
5711         crtc_debugfs_init(crtc);
5712
5713         return 0;
5714 }
5715 #endif
5716
5717 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5718 {
5719         enum dc_irq_source irq_source;
5720         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5721         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5722         int rc;
5723
5724         irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5725
5726         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5727
5728         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
5729                       acrtc->crtc_id, enable ? "en" : "dis", rc);
5730         return rc;
5731 }
5732
5733 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5734 {
5735         enum dc_irq_source irq_source;
5736         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5737         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5738         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5739 #if defined(CONFIG_DRM_AMD_DC_DCN)
5740         struct amdgpu_display_manager *dm = &adev->dm;
5741         unsigned long flags;
5742 #endif
5743         int rc = 0;
5744
5745         if (enable) {
5746                 /* vblank irq on -> Only need vupdate irq in vrr mode */
5747                 if (amdgpu_dm_vrr_active(acrtc_state))
5748                         rc = dm_set_vupdate_irq(crtc, true);
5749         } else {
5750                 /* vblank irq off -> vupdate irq off */
5751                 rc = dm_set_vupdate_irq(crtc, false);
5752         }
5753
5754         if (rc)
5755                 return rc;
5756
5757         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5758
5759         if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5760                 return -EBUSY;
5761
5762         if (amdgpu_in_reset(adev))
5763                 return 0;
5764
5765 #if defined(CONFIG_DRM_AMD_DC_DCN)
5766         spin_lock_irqsave(&dm->vblank_lock, flags);
5767         dm->vblank_workqueue->dm = dm;
5768         dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
5769         dm->vblank_workqueue->enable = enable;
5770         spin_unlock_irqrestore(&dm->vblank_lock, flags);
5771         schedule_work(&dm->vblank_workqueue->mall_work);
5772 #endif
5773
5774         return 0;
5775 }
5776
5777 static int dm_enable_vblank(struct drm_crtc *crtc)
5778 {
5779         return dm_set_vblank(crtc, true);
5780 }
5781
5782 static void dm_disable_vblank(struct drm_crtc *crtc)
5783 {
5784         dm_set_vblank(crtc, false);
5785 }
5786
5787 /* Implemented only the options currently availible for the driver */
5788 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5789         .reset = dm_crtc_reset_state,
5790         .destroy = amdgpu_dm_crtc_destroy,
5791         .set_config = drm_atomic_helper_set_config,
5792         .page_flip = drm_atomic_helper_page_flip,
5793         .atomic_duplicate_state = dm_crtc_duplicate_state,
5794         .atomic_destroy_state = dm_crtc_destroy_state,
5795         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
5796         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5797         .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5798         .get_vblank_counter = amdgpu_get_vblank_counter_kms,
5799         .enable_vblank = dm_enable_vblank,
5800         .disable_vblank = dm_disable_vblank,
5801         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5802 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
5803         .late_register = amdgpu_dm_crtc_late_register,
5804 #endif
5805 };
5806
5807 static enum drm_connector_status
5808 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5809 {
5810         bool connected;
5811         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5812
5813         /*
5814          * Notes:
5815          * 1. This interface is NOT called in context of HPD irq.
5816          * 2. This interface *is called* in context of user-mode ioctl. Which
5817          * makes it a bad place for *any* MST-related activity.
5818          */
5819
5820         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5821             !aconnector->fake_enable)
5822                 connected = (aconnector->dc_sink != NULL);
5823         else
5824                 connected = (aconnector->base.force == DRM_FORCE_ON);
5825
5826         update_subconnector_property(aconnector);
5827
5828         return (connected ? connector_status_connected :
5829                         connector_status_disconnected);
5830 }
5831
5832 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5833                                             struct drm_connector_state *connector_state,
5834                                             struct drm_property *property,
5835                                             uint64_t val)
5836 {
5837         struct drm_device *dev = connector->dev;
5838         struct amdgpu_device *adev = drm_to_adev(dev);
5839         struct dm_connector_state *dm_old_state =
5840                 to_dm_connector_state(connector->state);
5841         struct dm_connector_state *dm_new_state =
5842                 to_dm_connector_state(connector_state);
5843
5844         int ret = -EINVAL;
5845
5846         if (property == dev->mode_config.scaling_mode_property) {
5847                 enum amdgpu_rmx_type rmx_type;
5848
5849                 switch (val) {
5850                 case DRM_MODE_SCALE_CENTER:
5851                         rmx_type = RMX_CENTER;
5852                         break;
5853                 case DRM_MODE_SCALE_ASPECT:
5854                         rmx_type = RMX_ASPECT;
5855                         break;
5856                 case DRM_MODE_SCALE_FULLSCREEN:
5857                         rmx_type = RMX_FULL;
5858                         break;
5859                 case DRM_MODE_SCALE_NONE:
5860                 default:
5861                         rmx_type = RMX_OFF;
5862                         break;
5863                 }
5864
5865                 if (dm_old_state->scaling == rmx_type)
5866                         return 0;
5867
5868                 dm_new_state->scaling = rmx_type;
5869                 ret = 0;
5870         } else if (property == adev->mode_info.underscan_hborder_property) {
5871                 dm_new_state->underscan_hborder = val;
5872                 ret = 0;
5873         } else if (property == adev->mode_info.underscan_vborder_property) {
5874                 dm_new_state->underscan_vborder = val;
5875                 ret = 0;
5876         } else if (property == adev->mode_info.underscan_property) {
5877                 dm_new_state->underscan_enable = val;
5878                 ret = 0;
5879         } else if (property == adev->mode_info.abm_level_property) {
5880                 dm_new_state->abm_level = val;
5881                 ret = 0;
5882         }
5883
5884         return ret;
5885 }
5886
5887 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5888                                             const struct drm_connector_state *state,
5889                                             struct drm_property *property,
5890                                             uint64_t *val)
5891 {
5892         struct drm_device *dev = connector->dev;
5893         struct amdgpu_device *adev = drm_to_adev(dev);
5894         struct dm_connector_state *dm_state =
5895                 to_dm_connector_state(state);
5896         int ret = -EINVAL;
5897
5898         if (property == dev->mode_config.scaling_mode_property) {
5899                 switch (dm_state->scaling) {
5900                 case RMX_CENTER:
5901                         *val = DRM_MODE_SCALE_CENTER;
5902                         break;
5903                 case RMX_ASPECT:
5904                         *val = DRM_MODE_SCALE_ASPECT;
5905                         break;
5906                 case RMX_FULL:
5907                         *val = DRM_MODE_SCALE_FULLSCREEN;
5908                         break;
5909                 case RMX_OFF:
5910                 default:
5911                         *val = DRM_MODE_SCALE_NONE;
5912                         break;
5913                 }
5914                 ret = 0;
5915         } else if (property == adev->mode_info.underscan_hborder_property) {
5916                 *val = dm_state->underscan_hborder;
5917                 ret = 0;
5918         } else if (property == adev->mode_info.underscan_vborder_property) {
5919                 *val = dm_state->underscan_vborder;
5920                 ret = 0;
5921         } else if (property == adev->mode_info.underscan_property) {
5922                 *val = dm_state->underscan_enable;
5923                 ret = 0;
5924         } else if (property == adev->mode_info.abm_level_property) {
5925                 *val = dm_state->abm_level;
5926                 ret = 0;
5927         }
5928
5929         return ret;
5930 }
5931
5932 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5933 {
5934         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5935
5936         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5937 }
5938
5939 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5940 {
5941         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5942         const struct dc_link *link = aconnector->dc_link;
5943         struct amdgpu_device *adev = drm_to_adev(connector->dev);
5944         struct amdgpu_display_manager *dm = &adev->dm;
5945
5946         /*
5947          * Call only if mst_mgr was iniitalized before since it's not done
5948          * for all connector types.
5949          */
5950         if (aconnector->mst_mgr.dev)
5951                 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5952
5953 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5954         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5955
5956         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5957             link->type != dc_connection_none &&
5958             dm->backlight_dev) {
5959                 backlight_device_unregister(dm->backlight_dev);
5960                 dm->backlight_dev = NULL;
5961         }
5962 #endif
5963
5964         if (aconnector->dc_em_sink)
5965                 dc_sink_release(aconnector->dc_em_sink);
5966         aconnector->dc_em_sink = NULL;
5967         if (aconnector->dc_sink)
5968                 dc_sink_release(aconnector->dc_sink);
5969         aconnector->dc_sink = NULL;
5970
5971         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5972         drm_connector_unregister(connector);
5973         drm_connector_cleanup(connector);
5974         if (aconnector->i2c) {
5975                 i2c_del_adapter(&aconnector->i2c->base);
5976                 kfree(aconnector->i2c);
5977         }
5978         kfree(aconnector->dm_dp_aux.aux.name);
5979
5980         kfree(connector);
5981 }
5982
5983 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5984 {
5985         struct dm_connector_state *state =
5986                 to_dm_connector_state(connector->state);
5987
5988         if (connector->state)
5989                 __drm_atomic_helper_connector_destroy_state(connector->state);
5990
5991         kfree(state);
5992
5993         state = kzalloc(sizeof(*state), GFP_KERNEL);
5994
5995         if (state) {
5996                 state->scaling = RMX_OFF;
5997                 state->underscan_enable = false;
5998                 state->underscan_hborder = 0;
5999                 state->underscan_vborder = 0;
6000                 state->base.max_requested_bpc = 8;
6001                 state->vcpi_slots = 0;
6002                 state->pbn = 0;
6003                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6004                         state->abm_level = amdgpu_dm_abm_level;
6005
6006                 __drm_atomic_helper_connector_reset(connector, &state->base);
6007         }
6008 }
6009
6010 struct drm_connector_state *
6011 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6012 {
6013         struct dm_connector_state *state =
6014                 to_dm_connector_state(connector->state);
6015
6016         struct dm_connector_state *new_state =
6017                         kmemdup(state, sizeof(*state), GFP_KERNEL);
6018
6019         if (!new_state)
6020                 return NULL;
6021
6022         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6023
6024         new_state->freesync_capable = state->freesync_capable;
6025         new_state->abm_level = state->abm_level;
6026         new_state->scaling = state->scaling;
6027         new_state->underscan_enable = state->underscan_enable;
6028         new_state->underscan_hborder = state->underscan_hborder;
6029         new_state->underscan_vborder = state->underscan_vborder;
6030         new_state->vcpi_slots = state->vcpi_slots;
6031         new_state->pbn = state->pbn;
6032         return &new_state->base;
6033 }
6034
6035 static int
6036 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6037 {
6038         struct amdgpu_dm_connector *amdgpu_dm_connector =
6039                 to_amdgpu_dm_connector(connector);
6040         int r;
6041
6042         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6043             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6044                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6045                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6046                 if (r)
6047                         return r;
6048         }
6049
6050 #if defined(CONFIG_DEBUG_FS)
6051         connector_debugfs_init(amdgpu_dm_connector);
6052 #endif
6053
6054         return 0;
6055 }
6056
6057 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6058         .reset = amdgpu_dm_connector_funcs_reset,
6059         .detect = amdgpu_dm_connector_detect,
6060         .fill_modes = drm_helper_probe_single_connector_modes,
6061         .destroy = amdgpu_dm_connector_destroy,
6062         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6063         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6064         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6065         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6066         .late_register = amdgpu_dm_connector_late_register,
6067         .early_unregister = amdgpu_dm_connector_unregister
6068 };
6069
6070 static int get_modes(struct drm_connector *connector)
6071 {
6072         return amdgpu_dm_connector_get_modes(connector);
6073 }
6074
6075 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6076 {
6077         struct dc_sink_init_data init_params = {
6078                         .link = aconnector->dc_link,
6079                         .sink_signal = SIGNAL_TYPE_VIRTUAL
6080         };
6081         struct edid *edid;
6082
6083         if (!aconnector->base.edid_blob_ptr) {
6084                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6085                                 aconnector->base.name);
6086
6087                 aconnector->base.force = DRM_FORCE_OFF;
6088                 aconnector->base.override_edid = false;
6089                 return;
6090         }
6091
6092         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6093
6094         aconnector->edid = edid;
6095
6096         aconnector->dc_em_sink = dc_link_add_remote_sink(
6097                 aconnector->dc_link,
6098                 (uint8_t *)edid,
6099                 (edid->extensions + 1) * EDID_LENGTH,
6100                 &init_params);
6101
6102         if (aconnector->base.force == DRM_FORCE_ON) {
6103                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6104                 aconnector->dc_link->local_sink :
6105                 aconnector->dc_em_sink;
6106                 dc_sink_retain(aconnector->dc_sink);
6107         }
6108 }
6109
6110 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6111 {
6112         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6113
6114         /*
6115          * In case of headless boot with force on for DP managed connector
6116          * Those settings have to be != 0 to get initial modeset
6117          */
6118         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6119                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6120                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6121         }
6122
6123
6124         aconnector->base.override_edid = true;
6125         create_eml_sink(aconnector);
6126 }
6127
6128 static struct dc_stream_state *
6129 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6130                                 const struct drm_display_mode *drm_mode,
6131                                 const struct dm_connector_state *dm_state,
6132                                 const struct dc_stream_state *old_stream)
6133 {
6134         struct drm_connector *connector = &aconnector->base;
6135         struct amdgpu_device *adev = drm_to_adev(connector->dev);
6136         struct dc_stream_state *stream;
6137         const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6138         int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6139         enum dc_status dc_result = DC_OK;
6140
6141         do {
6142                 stream = create_stream_for_sink(aconnector, drm_mode,
6143                                                 dm_state, old_stream,
6144                                                 requested_bpc);
6145                 if (stream == NULL) {
6146                         DRM_ERROR("Failed to create stream for sink!\n");
6147                         break;
6148                 }
6149
6150                 dc_result = dc_validate_stream(adev->dm.dc, stream);
6151
6152                 if (dc_result != DC_OK) {
6153                         DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6154                                       drm_mode->hdisplay,
6155                                       drm_mode->vdisplay,
6156                                       drm_mode->clock,
6157                                       dc_result,
6158                                       dc_status_to_str(dc_result));
6159
6160                         dc_stream_release(stream);
6161                         stream = NULL;
6162                         requested_bpc -= 2; /* lower bpc to retry validation */
6163                 }
6164
6165         } while (stream == NULL && requested_bpc >= 6);
6166
6167         if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6168                 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6169
6170                 aconnector->force_yuv420_output = true;
6171                 stream = create_validate_stream_for_sink(aconnector, drm_mode,
6172                                                 dm_state, old_stream);
6173                 aconnector->force_yuv420_output = false;
6174         }
6175
6176         return stream;
6177 }
6178
6179 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6180                                    struct drm_display_mode *mode)
6181 {
6182         int result = MODE_ERROR;
6183         struct dc_sink *dc_sink;
6184         /* TODO: Unhardcode stream count */
6185         struct dc_stream_state *stream;
6186         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6187
6188         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6189                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6190                 return result;
6191
6192         /*
6193          * Only run this the first time mode_valid is called to initilialize
6194          * EDID mgmt
6195          */
6196         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6197                 !aconnector->dc_em_sink)
6198                 handle_edid_mgmt(aconnector);
6199
6200         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6201
6202         if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6203                                 aconnector->base.force != DRM_FORCE_ON) {
6204                 DRM_ERROR("dc_sink is NULL!\n");
6205                 goto fail;
6206         }
6207
6208         stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6209         if (stream) {
6210                 dc_stream_release(stream);
6211                 result = MODE_OK;
6212         }
6213
6214 fail:
6215         /* TODO: error handling*/
6216         return result;
6217 }
6218
6219 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6220                                 struct dc_info_packet *out)
6221 {
6222         struct hdmi_drm_infoframe frame;
6223         unsigned char buf[30]; /* 26 + 4 */
6224         ssize_t len;
6225         int ret, i;
6226
6227         memset(out, 0, sizeof(*out));
6228
6229         if (!state->hdr_output_metadata)
6230                 return 0;
6231
6232         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6233         if (ret)
6234                 return ret;
6235
6236         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6237         if (len < 0)
6238                 return (int)len;
6239
6240         /* Static metadata is a fixed 26 bytes + 4 byte header. */
6241         if (len != 30)
6242                 return -EINVAL;
6243
6244         /* Prepare the infopacket for DC. */
6245         switch (state->connector->connector_type) {
6246         case DRM_MODE_CONNECTOR_HDMIA:
6247                 out->hb0 = 0x87; /* type */
6248                 out->hb1 = 0x01; /* version */
6249                 out->hb2 = 0x1A; /* length */
6250                 out->sb[0] = buf[3]; /* checksum */
6251                 i = 1;
6252                 break;
6253
6254         case DRM_MODE_CONNECTOR_DisplayPort:
6255         case DRM_MODE_CONNECTOR_eDP:
6256                 out->hb0 = 0x00; /* sdp id, zero */
6257                 out->hb1 = 0x87; /* type */
6258                 out->hb2 = 0x1D; /* payload len - 1 */
6259                 out->hb3 = (0x13 << 2); /* sdp version */
6260                 out->sb[0] = 0x01; /* version */
6261                 out->sb[1] = 0x1A; /* length */
6262                 i = 2;
6263                 break;
6264
6265         default:
6266                 return -EINVAL;
6267         }
6268
6269         memcpy(&out->sb[i], &buf[4], 26);
6270         out->valid = true;
6271
6272         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6273                        sizeof(out->sb), false);
6274
6275         return 0;
6276 }
6277
6278 static bool
6279 is_hdr_metadata_different(const struct drm_connector_state *old_state,
6280                           const struct drm_connector_state *new_state)
6281 {
6282         struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
6283         struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
6284
6285         if (old_blob != new_blob) {
6286                 if (old_blob && new_blob &&
6287                     old_blob->length == new_blob->length)
6288                         return memcmp(old_blob->data, new_blob->data,
6289                                       old_blob->length);
6290
6291                 return true;
6292         }
6293
6294         return false;
6295 }
6296
6297 static int
6298 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6299                                  struct drm_atomic_state *state)
6300 {
6301         struct drm_connector_state *new_con_state =
6302                 drm_atomic_get_new_connector_state(state, conn);
6303         struct drm_connector_state *old_con_state =
6304                 drm_atomic_get_old_connector_state(state, conn);
6305         struct drm_crtc *crtc = new_con_state->crtc;
6306         struct drm_crtc_state *new_crtc_state;
6307         int ret;
6308
6309         trace_amdgpu_dm_connector_atomic_check(new_con_state);
6310
6311         if (!crtc)
6312                 return 0;
6313
6314         if (is_hdr_metadata_different(old_con_state, new_con_state)) {
6315                 struct dc_info_packet hdr_infopacket;
6316
6317                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6318                 if (ret)
6319                         return ret;
6320
6321                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6322                 if (IS_ERR(new_crtc_state))
6323                         return PTR_ERR(new_crtc_state);
6324
6325                 /*
6326                  * DC considers the stream backends changed if the
6327                  * static metadata changes. Forcing the modeset also
6328                  * gives a simple way for userspace to switch from
6329                  * 8bpc to 10bpc when setting the metadata to enter
6330                  * or exit HDR.
6331                  *
6332                  * Changing the static metadata after it's been
6333                  * set is permissible, however. So only force a
6334                  * modeset if we're entering or exiting HDR.
6335                  */
6336                 new_crtc_state->mode_changed =
6337                         !old_con_state->hdr_output_metadata ||
6338                         !new_con_state->hdr_output_metadata;
6339         }
6340
6341         return 0;
6342 }
6343
6344 static const struct drm_connector_helper_funcs
6345 amdgpu_dm_connector_helper_funcs = {
6346         /*
6347          * If hotplugging a second bigger display in FB Con mode, bigger resolution
6348          * modes will be filtered by drm_mode_validate_size(), and those modes
6349          * are missing after user start lightdm. So we need to renew modes list.
6350          * in get_modes call back, not just return the modes count
6351          */
6352         .get_modes = get_modes,
6353         .mode_valid = amdgpu_dm_connector_mode_valid,
6354         .atomic_check = amdgpu_dm_connector_atomic_check,
6355 };
6356
6357 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6358 {
6359 }
6360
6361 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6362 {
6363         struct drm_atomic_state *state = new_crtc_state->state;
6364         struct drm_plane *plane;
6365         int num_active = 0;
6366
6367         drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6368                 struct drm_plane_state *new_plane_state;
6369
6370                 /* Cursor planes are "fake". */
6371                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6372                         continue;
6373
6374                 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6375
6376                 if (!new_plane_state) {
6377                         /*
6378                          * The plane is enable on the CRTC and hasn't changed
6379                          * state. This means that it previously passed
6380                          * validation and is therefore enabled.
6381                          */
6382                         num_active += 1;
6383                         continue;
6384                 }
6385
6386                 /* We need a framebuffer to be considered enabled. */
6387                 num_active += (new_plane_state->fb != NULL);
6388         }
6389
6390         return num_active;
6391 }
6392
6393 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6394                                          struct drm_crtc_state *new_crtc_state)
6395 {
6396         struct dm_crtc_state *dm_new_crtc_state =
6397                 to_dm_crtc_state(new_crtc_state);
6398
6399         dm_new_crtc_state->active_planes = 0;
6400
6401         if (!dm_new_crtc_state->stream)
6402                 return;
6403
6404         dm_new_crtc_state->active_planes =
6405                 count_crtc_active_planes(new_crtc_state);
6406 }
6407
6408 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6409                                        struct drm_atomic_state *state)
6410 {
6411         struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6412                                                                           crtc);
6413         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6414         struct dc *dc = adev->dm.dc;
6415         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6416         int ret = -EINVAL;
6417
6418         trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6419
6420         dm_update_crtc_active_planes(crtc, crtc_state);
6421
6422         if (unlikely(!dm_crtc_state->stream &&
6423                      modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6424                 WARN_ON(1);
6425                 return ret;
6426         }
6427
6428         /*
6429          * We require the primary plane to be enabled whenever the CRTC is, otherwise
6430          * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6431          * planes are disabled, which is not supported by the hardware. And there is legacy
6432          * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6433          */
6434         if (crtc_state->enable &&
6435             !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6436                 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6437                 return -EINVAL;
6438         }
6439
6440         /* In some use cases, like reset, no stream is attached */
6441         if (!dm_crtc_state->stream)
6442                 return 0;
6443
6444         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6445                 return 0;
6446
6447         DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6448         return ret;
6449 }
6450
6451 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6452                                       const struct drm_display_mode *mode,
6453                                       struct drm_display_mode *adjusted_mode)
6454 {
6455         return true;
6456 }
6457
6458 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6459         .disable = dm_crtc_helper_disable,
6460         .atomic_check = dm_crtc_helper_atomic_check,
6461         .mode_fixup = dm_crtc_helper_mode_fixup,
6462         .get_scanout_position = amdgpu_crtc_get_scanout_position,
6463 };
6464
6465 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6466 {
6467
6468 }
6469
6470 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6471 {
6472         switch (display_color_depth) {
6473                 case COLOR_DEPTH_666:
6474                         return 6;
6475                 case COLOR_DEPTH_888:
6476                         return 8;
6477                 case COLOR_DEPTH_101010:
6478                         return 10;
6479                 case COLOR_DEPTH_121212:
6480                         return 12;
6481                 case COLOR_DEPTH_141414:
6482                         return 14;
6483                 case COLOR_DEPTH_161616:
6484                         return 16;
6485                 default:
6486                         break;
6487                 }
6488         return 0;
6489 }
6490
6491 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6492                                           struct drm_crtc_state *crtc_state,
6493                                           struct drm_connector_state *conn_state)
6494 {
6495         struct drm_atomic_state *state = crtc_state->state;
6496         struct drm_connector *connector = conn_state->connector;
6497         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6498         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6499         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6500         struct drm_dp_mst_topology_mgr *mst_mgr;
6501         struct drm_dp_mst_port *mst_port;
6502         enum dc_color_depth color_depth;
6503         int clock, bpp = 0;
6504         bool is_y420 = false;
6505
6506         if (!aconnector->port || !aconnector->dc_sink)
6507                 return 0;
6508
6509         mst_port = aconnector->port;
6510         mst_mgr = &aconnector->mst_port->mst_mgr;
6511
6512         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6513                 return 0;
6514
6515         if (!state->duplicated) {
6516                 int max_bpc = conn_state->max_requested_bpc;
6517                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6518                                 aconnector->force_yuv420_output;
6519                 color_depth = convert_color_depth_from_display_info(connector,
6520                                                                     is_y420,
6521                                                                     max_bpc);
6522                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6523                 clock = adjusted_mode->clock;
6524                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6525         }
6526         dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6527                                                                            mst_mgr,
6528                                                                            mst_port,
6529                                                                            dm_new_connector_state->pbn,
6530                                                                            dm_mst_get_pbn_divider(aconnector->dc_link));
6531         if (dm_new_connector_state->vcpi_slots < 0) {
6532                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6533                 return dm_new_connector_state->vcpi_slots;
6534         }
6535         return 0;
6536 }
6537
6538 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6539         .disable = dm_encoder_helper_disable,
6540         .atomic_check = dm_encoder_helper_atomic_check
6541 };
6542
6543 #if defined(CONFIG_DRM_AMD_DC_DCN)
6544 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6545                                             struct dc_state *dc_state)
6546 {
6547         struct dc_stream_state *stream = NULL;
6548         struct drm_connector *connector;
6549         struct drm_connector_state *new_con_state, *old_con_state;
6550         struct amdgpu_dm_connector *aconnector;
6551         struct dm_connector_state *dm_conn_state;
6552         int i, j, clock, bpp;
6553         int vcpi, pbn_div, pbn = 0;
6554
6555         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6556
6557                 aconnector = to_amdgpu_dm_connector(connector);
6558
6559                 if (!aconnector->port)
6560                         continue;
6561
6562                 if (!new_con_state || !new_con_state->crtc)
6563                         continue;
6564
6565                 dm_conn_state = to_dm_connector_state(new_con_state);
6566
6567                 for (j = 0; j < dc_state->stream_count; j++) {
6568                         stream = dc_state->streams[j];
6569                         if (!stream)
6570                                 continue;
6571
6572                         if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6573                                 break;
6574
6575                         stream = NULL;
6576                 }
6577
6578                 if (!stream)
6579                         continue;
6580
6581                 if (stream->timing.flags.DSC != 1) {
6582                         drm_dp_mst_atomic_enable_dsc(state,
6583                                                      aconnector->port,
6584                                                      dm_conn_state->pbn,
6585                                                      0,
6586                                                      false);
6587                         continue;
6588                 }
6589
6590                 pbn_div = dm_mst_get_pbn_divider(stream->link);
6591                 bpp = stream->timing.dsc_cfg.bits_per_pixel;
6592                 clock = stream->timing.pix_clk_100hz / 10;
6593                 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6594                 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6595                                                     aconnector->port,
6596                                                     pbn, pbn_div,
6597                                                     true);
6598                 if (vcpi < 0)
6599                         return vcpi;
6600
6601                 dm_conn_state->pbn = pbn;
6602                 dm_conn_state->vcpi_slots = vcpi;
6603         }
6604         return 0;
6605 }
6606 #endif
6607
6608 static void dm_drm_plane_reset(struct drm_plane *plane)
6609 {
6610         struct dm_plane_state *amdgpu_state = NULL;
6611
6612         if (plane->state)
6613                 plane->funcs->atomic_destroy_state(plane, plane->state);
6614
6615         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6616         WARN_ON(amdgpu_state == NULL);
6617
6618         if (amdgpu_state)
6619                 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6620 }
6621
6622 static struct drm_plane_state *
6623 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6624 {
6625         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6626
6627         old_dm_plane_state = to_dm_plane_state(plane->state);
6628         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6629         if (!dm_plane_state)
6630                 return NULL;
6631
6632         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6633
6634         if (old_dm_plane_state->dc_state) {
6635                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6636                 dc_plane_state_retain(dm_plane_state->dc_state);
6637         }
6638
6639         return &dm_plane_state->base;
6640 }
6641
6642 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6643                                 struct drm_plane_state *state)
6644 {
6645         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6646
6647         if (dm_plane_state->dc_state)
6648                 dc_plane_state_release(dm_plane_state->dc_state);
6649
6650         drm_atomic_helper_plane_destroy_state(plane, state);
6651 }
6652
6653 static const struct drm_plane_funcs dm_plane_funcs = {
6654         .update_plane   = drm_atomic_helper_update_plane,
6655         .disable_plane  = drm_atomic_helper_disable_plane,
6656         .destroy        = drm_primary_helper_destroy,
6657         .reset = dm_drm_plane_reset,
6658         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6659         .atomic_destroy_state = dm_drm_plane_destroy_state,
6660         .format_mod_supported = dm_plane_format_mod_supported,
6661 };
6662
6663 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6664                                       struct drm_plane_state *new_state)
6665 {
6666         struct amdgpu_framebuffer *afb;
6667         struct drm_gem_object *obj;
6668         struct amdgpu_device *adev;
6669         struct amdgpu_bo *rbo;
6670         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6671         struct list_head list;
6672         struct ttm_validate_buffer tv;
6673         struct ww_acquire_ctx ticket;
6674         uint32_t domain;
6675         int r;
6676
6677         if (!new_state->fb) {
6678                 DRM_DEBUG_KMS("No FB bound\n");
6679                 return 0;
6680         }
6681
6682         afb = to_amdgpu_framebuffer(new_state->fb);
6683         obj = new_state->fb->obj[0];
6684         rbo = gem_to_amdgpu_bo(obj);
6685         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6686         INIT_LIST_HEAD(&list);
6687
6688         tv.bo = &rbo->tbo;
6689         tv.num_shared = 1;
6690         list_add(&tv.head, &list);
6691
6692         r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6693         if (r) {
6694                 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6695                 return r;
6696         }
6697
6698         if (plane->type != DRM_PLANE_TYPE_CURSOR)
6699                 domain = amdgpu_display_supported_domains(adev, rbo->flags);
6700         else
6701                 domain = AMDGPU_GEM_DOMAIN_VRAM;
6702
6703         r = amdgpu_bo_pin(rbo, domain);
6704         if (unlikely(r != 0)) {
6705                 if (r != -ERESTARTSYS)
6706                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6707                 ttm_eu_backoff_reservation(&ticket, &list);
6708                 return r;
6709         }
6710
6711         r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6712         if (unlikely(r != 0)) {
6713                 amdgpu_bo_unpin(rbo);
6714                 ttm_eu_backoff_reservation(&ticket, &list);
6715                 DRM_ERROR("%p bind failed\n", rbo);
6716                 return r;
6717         }
6718
6719         ttm_eu_backoff_reservation(&ticket, &list);
6720
6721         afb->address = amdgpu_bo_gpu_offset(rbo);
6722
6723         amdgpu_bo_ref(rbo);
6724
6725         /**
6726          * We don't do surface updates on planes that have been newly created,
6727          * but we also don't have the afb->address during atomic check.
6728          *
6729          * Fill in buffer attributes depending on the address here, but only on
6730          * newly created planes since they're not being used by DC yet and this
6731          * won't modify global state.
6732          */
6733         dm_plane_state_old = to_dm_plane_state(plane->state);
6734         dm_plane_state_new = to_dm_plane_state(new_state);
6735
6736         if (dm_plane_state_new->dc_state &&
6737             dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6738                 struct dc_plane_state *plane_state =
6739                         dm_plane_state_new->dc_state;
6740                 bool force_disable_dcc = !plane_state->dcc.enable;
6741
6742                 fill_plane_buffer_attributes(
6743                         adev, afb, plane_state->format, plane_state->rotation,
6744                         afb->tiling_flags,
6745                         &plane_state->tiling_info, &plane_state->plane_size,
6746                         &plane_state->dcc, &plane_state->address,
6747                         afb->tmz_surface, force_disable_dcc);
6748         }
6749
6750         return 0;
6751 }
6752
6753 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6754                                        struct drm_plane_state *old_state)
6755 {
6756         struct amdgpu_bo *rbo;
6757         int r;
6758
6759         if (!old_state->fb)
6760                 return;
6761
6762         rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6763         r = amdgpu_bo_reserve(rbo, false);
6764         if (unlikely(r)) {
6765                 DRM_ERROR("failed to reserve rbo before unpin\n");
6766                 return;
6767         }
6768
6769         amdgpu_bo_unpin(rbo);
6770         amdgpu_bo_unreserve(rbo);
6771         amdgpu_bo_unref(&rbo);
6772 }
6773
6774 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6775                                        struct drm_crtc_state *new_crtc_state)
6776 {
6777         struct drm_framebuffer *fb = state->fb;
6778         int min_downscale, max_upscale;
6779         int min_scale = 0;
6780         int max_scale = INT_MAX;
6781
6782         /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6783         if (fb && state->crtc) {
6784                 /* Validate viewport to cover the case when only the position changes */
6785                 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
6786                         int viewport_width = state->crtc_w;
6787                         int viewport_height = state->crtc_h;
6788
6789                         if (state->crtc_x < 0)
6790                                 viewport_width += state->crtc_x;
6791                         else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
6792                                 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
6793
6794                         if (state->crtc_y < 0)
6795                                 viewport_height += state->crtc_y;
6796                         else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
6797                                 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
6798
6799                         if (viewport_width < 0 || viewport_height < 0) {
6800                                 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
6801                                 return -EINVAL;
6802                         } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
6803                                 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
6804                                 return -EINVAL;
6805                         } else if (viewport_height < MIN_VIEWPORT_SIZE) {
6806                                 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
6807                                 return -EINVAL;
6808                         }
6809
6810                 }
6811
6812                 /* Get min/max allowed scaling factors from plane caps. */
6813                 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6814                                              &min_downscale, &max_upscale);
6815                 /*
6816                  * Convert to drm convention: 16.16 fixed point, instead of dc's
6817                  * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6818                  * dst/src, so min_scale = 1.0 / max_upscale, etc.
6819                  */
6820                 min_scale = (1000 << 16) / max_upscale;
6821                 max_scale = (1000 << 16) / min_downscale;
6822         }
6823
6824         return drm_atomic_helper_check_plane_state(
6825                 state, new_crtc_state, min_scale, max_scale, true, true);
6826 }
6827
6828 static int dm_plane_atomic_check(struct drm_plane *plane,
6829                                  struct drm_atomic_state *state)
6830 {
6831         struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
6832                                                                                  plane);
6833         struct amdgpu_device *adev = drm_to_adev(plane->dev);
6834         struct dc *dc = adev->dm.dc;
6835         struct dm_plane_state *dm_plane_state;
6836         struct dc_scaling_info scaling_info;
6837         struct drm_crtc_state *new_crtc_state;
6838         int ret;
6839
6840         trace_amdgpu_dm_plane_atomic_check(new_plane_state);
6841
6842         dm_plane_state = to_dm_plane_state(new_plane_state);
6843
6844         if (!dm_plane_state->dc_state)
6845                 return 0;
6846
6847         new_crtc_state =
6848                 drm_atomic_get_new_crtc_state(state,
6849                                               new_plane_state->crtc);
6850         if (!new_crtc_state)
6851                 return -EINVAL;
6852
6853         ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
6854         if (ret)
6855                 return ret;
6856
6857         ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
6858         if (ret)
6859                 return ret;
6860
6861         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6862                 return 0;
6863
6864         return -EINVAL;
6865 }
6866
6867 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6868                                        struct drm_atomic_state *state)
6869 {
6870         /* Only support async updates on cursor planes. */
6871         if (plane->type != DRM_PLANE_TYPE_CURSOR)
6872                 return -EINVAL;
6873
6874         return 0;
6875 }
6876
6877 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6878                                          struct drm_atomic_state *state)
6879 {
6880         struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
6881                                                                            plane);
6882         struct drm_plane_state *old_state =
6883                 drm_atomic_get_old_plane_state(state, plane);
6884
6885         trace_amdgpu_dm_atomic_update_cursor(new_state);
6886
6887         swap(plane->state->fb, new_state->fb);
6888
6889         plane->state->src_x = new_state->src_x;
6890         plane->state->src_y = new_state->src_y;
6891         plane->state->src_w = new_state->src_w;
6892         plane->state->src_h = new_state->src_h;
6893         plane->state->crtc_x = new_state->crtc_x;
6894         plane->state->crtc_y = new_state->crtc_y;
6895         plane->state->crtc_w = new_state->crtc_w;
6896         plane->state->crtc_h = new_state->crtc_h;
6897
6898         handle_cursor_update(plane, old_state);
6899 }
6900
6901 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6902         .prepare_fb = dm_plane_helper_prepare_fb,
6903         .cleanup_fb = dm_plane_helper_cleanup_fb,
6904         .atomic_check = dm_plane_atomic_check,
6905         .atomic_async_check = dm_plane_atomic_async_check,
6906         .atomic_async_update = dm_plane_atomic_async_update
6907 };
6908
6909 /*
6910  * TODO: these are currently initialized to rgb formats only.
6911  * For future use cases we should either initialize them dynamically based on
6912  * plane capabilities, or initialize this array to all formats, so internal drm
6913  * check will succeed, and let DC implement proper check
6914  */
6915 static const uint32_t rgb_formats[] = {
6916         DRM_FORMAT_XRGB8888,
6917         DRM_FORMAT_ARGB8888,
6918         DRM_FORMAT_RGBA8888,
6919         DRM_FORMAT_XRGB2101010,
6920         DRM_FORMAT_XBGR2101010,
6921         DRM_FORMAT_ARGB2101010,
6922         DRM_FORMAT_ABGR2101010,
6923         DRM_FORMAT_XBGR8888,
6924         DRM_FORMAT_ABGR8888,
6925         DRM_FORMAT_RGB565,
6926 };
6927
6928 static const uint32_t overlay_formats[] = {
6929         DRM_FORMAT_XRGB8888,
6930         DRM_FORMAT_ARGB8888,
6931         DRM_FORMAT_RGBA8888,
6932         DRM_FORMAT_XBGR8888,
6933         DRM_FORMAT_ABGR8888,
6934         DRM_FORMAT_RGB565
6935 };
6936
6937 static const u32 cursor_formats[] = {
6938         DRM_FORMAT_ARGB8888
6939 };
6940
6941 static int get_plane_formats(const struct drm_plane *plane,
6942                              const struct dc_plane_cap *plane_cap,
6943                              uint32_t *formats, int max_formats)
6944 {
6945         int i, num_formats = 0;
6946
6947         /*
6948          * TODO: Query support for each group of formats directly from
6949          * DC plane caps. This will require adding more formats to the
6950          * caps list.
6951          */
6952
6953         switch (plane->type) {
6954         case DRM_PLANE_TYPE_PRIMARY:
6955                 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6956                         if (num_formats >= max_formats)
6957                                 break;
6958
6959                         formats[num_formats++] = rgb_formats[i];
6960                 }
6961
6962                 if (plane_cap && plane_cap->pixel_format_support.nv12)
6963                         formats[num_formats++] = DRM_FORMAT_NV12;
6964                 if (plane_cap && plane_cap->pixel_format_support.p010)
6965                         formats[num_formats++] = DRM_FORMAT_P010;
6966                 if (plane_cap && plane_cap->pixel_format_support.fp16) {
6967                         formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6968                         formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6969                         formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6970                         formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6971                 }
6972                 break;
6973
6974         case DRM_PLANE_TYPE_OVERLAY:
6975                 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6976                         if (num_formats >= max_formats)
6977                                 break;
6978
6979                         formats[num_formats++] = overlay_formats[i];
6980                 }
6981                 break;
6982
6983         case DRM_PLANE_TYPE_CURSOR:
6984                 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6985                         if (num_formats >= max_formats)
6986                                 break;
6987
6988                         formats[num_formats++] = cursor_formats[i];
6989                 }
6990                 break;
6991         }
6992
6993         return num_formats;
6994 }
6995
6996 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6997                                 struct drm_plane *plane,
6998                                 unsigned long possible_crtcs,
6999                                 const struct dc_plane_cap *plane_cap)
7000 {
7001         uint32_t formats[32];
7002         int num_formats;
7003         int res = -EPERM;
7004         unsigned int supported_rotations;
7005         uint64_t *modifiers = NULL;
7006
7007         num_formats = get_plane_formats(plane, plane_cap, formats,
7008                                         ARRAY_SIZE(formats));
7009
7010         res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7011         if (res)
7012                 return res;
7013
7014         res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7015                                        &dm_plane_funcs, formats, num_formats,
7016                                        modifiers, plane->type, NULL);
7017         kfree(modifiers);
7018         if (res)
7019                 return res;
7020
7021         if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7022             plane_cap && plane_cap->per_pixel_alpha) {
7023                 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7024                                           BIT(DRM_MODE_BLEND_PREMULTI);
7025
7026                 drm_plane_create_alpha_property(plane);
7027                 drm_plane_create_blend_mode_property(plane, blend_caps);
7028         }
7029
7030         if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7031             plane_cap &&
7032             (plane_cap->pixel_format_support.nv12 ||
7033              plane_cap->pixel_format_support.p010)) {
7034                 /* This only affects YUV formats. */
7035                 drm_plane_create_color_properties(
7036                         plane,
7037                         BIT(DRM_COLOR_YCBCR_BT601) |
7038                         BIT(DRM_COLOR_YCBCR_BT709) |
7039                         BIT(DRM_COLOR_YCBCR_BT2020),
7040                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7041                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7042                         DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7043         }
7044
7045         supported_rotations =
7046                 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7047                 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7048
7049         if (dm->adev->asic_type >= CHIP_BONAIRE &&
7050             plane->type != DRM_PLANE_TYPE_CURSOR)
7051                 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7052                                                    supported_rotations);
7053
7054         drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7055
7056         /* Create (reset) the plane state */
7057         if (plane->funcs->reset)
7058                 plane->funcs->reset(plane);
7059
7060         return 0;
7061 }
7062
7063 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7064                                struct drm_plane *plane,
7065                                uint32_t crtc_index)
7066 {
7067         struct amdgpu_crtc *acrtc = NULL;
7068         struct drm_plane *cursor_plane;
7069
7070         int res = -ENOMEM;
7071
7072         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7073         if (!cursor_plane)
7074                 goto fail;
7075
7076         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7077         res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7078
7079         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7080         if (!acrtc)
7081                 goto fail;
7082
7083         res = drm_crtc_init_with_planes(
7084                         dm->ddev,
7085                         &acrtc->base,
7086                         plane,
7087                         cursor_plane,
7088                         &amdgpu_dm_crtc_funcs, NULL);
7089
7090         if (res)
7091                 goto fail;
7092
7093         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7094
7095         /* Create (reset) the plane state */
7096         if (acrtc->base.funcs->reset)
7097                 acrtc->base.funcs->reset(&acrtc->base);
7098
7099         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7100         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7101
7102         acrtc->crtc_id = crtc_index;
7103         acrtc->base.enabled = false;
7104         acrtc->otg_inst = -1;
7105
7106         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7107         drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7108                                    true, MAX_COLOR_LUT_ENTRIES);
7109         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7110
7111         return 0;
7112
7113 fail:
7114         kfree(acrtc);
7115         kfree(cursor_plane);
7116         return res;
7117 }
7118
7119
7120 static int to_drm_connector_type(enum signal_type st)
7121 {
7122         switch (st) {
7123         case SIGNAL_TYPE_HDMI_TYPE_A:
7124                 return DRM_MODE_CONNECTOR_HDMIA;
7125         case SIGNAL_TYPE_EDP:
7126                 return DRM_MODE_CONNECTOR_eDP;
7127         case SIGNAL_TYPE_LVDS:
7128                 return DRM_MODE_CONNECTOR_LVDS;
7129         case SIGNAL_TYPE_RGB:
7130                 return DRM_MODE_CONNECTOR_VGA;
7131         case SIGNAL_TYPE_DISPLAY_PORT:
7132         case SIGNAL_TYPE_DISPLAY_PORT_MST:
7133                 return DRM_MODE_CONNECTOR_DisplayPort;
7134         case SIGNAL_TYPE_DVI_DUAL_LINK:
7135         case SIGNAL_TYPE_DVI_SINGLE_LINK:
7136                 return DRM_MODE_CONNECTOR_DVID;
7137         case SIGNAL_TYPE_VIRTUAL:
7138                 return DRM_MODE_CONNECTOR_VIRTUAL;
7139
7140         default:
7141                 return DRM_MODE_CONNECTOR_Unknown;
7142         }
7143 }
7144
7145 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7146 {
7147         struct drm_encoder *encoder;
7148
7149         /* There is only one encoder per connector */
7150         drm_connector_for_each_possible_encoder(connector, encoder)
7151                 return encoder;
7152
7153         return NULL;
7154 }
7155
7156 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7157 {
7158         struct drm_encoder *encoder;
7159         struct amdgpu_encoder *amdgpu_encoder;
7160
7161         encoder = amdgpu_dm_connector_to_encoder(connector);
7162
7163         if (encoder == NULL)
7164                 return;
7165
7166         amdgpu_encoder = to_amdgpu_encoder(encoder);
7167
7168         amdgpu_encoder->native_mode.clock = 0;
7169
7170         if (!list_empty(&connector->probed_modes)) {
7171                 struct drm_display_mode *preferred_mode = NULL;
7172
7173                 list_for_each_entry(preferred_mode,
7174                                     &connector->probed_modes,
7175                                     head) {
7176                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7177                                 amdgpu_encoder->native_mode = *preferred_mode;
7178
7179                         break;
7180                 }
7181
7182         }
7183 }
7184
7185 static struct drm_display_mode *
7186 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7187                              char *name,
7188                              int hdisplay, int vdisplay)
7189 {
7190         struct drm_device *dev = encoder->dev;
7191         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7192         struct drm_display_mode *mode = NULL;
7193         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7194
7195         mode = drm_mode_duplicate(dev, native_mode);
7196
7197         if (mode == NULL)
7198                 return NULL;
7199
7200         mode->hdisplay = hdisplay;
7201         mode->vdisplay = vdisplay;
7202         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7203         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7204
7205         return mode;
7206
7207 }
7208
7209 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7210                                                  struct drm_connector *connector)
7211 {
7212         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7213         struct drm_display_mode *mode = NULL;
7214         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7215         struct amdgpu_dm_connector *amdgpu_dm_connector =
7216                                 to_amdgpu_dm_connector(connector);
7217         int i;
7218         int n;
7219         struct mode_size {
7220                 char name[DRM_DISPLAY_MODE_LEN];
7221                 int w;
7222                 int h;
7223         } common_modes[] = {
7224                 {  "640x480",  640,  480},
7225                 {  "800x600",  800,  600},
7226                 { "1024x768", 1024,  768},
7227                 { "1280x720", 1280,  720},
7228                 { "1280x800", 1280,  800},
7229                 {"1280x1024", 1280, 1024},
7230                 { "1440x900", 1440,  900},
7231                 {"1680x1050", 1680, 1050},
7232                 {"1600x1200", 1600, 1200},
7233                 {"1920x1080", 1920, 1080},
7234                 {"1920x1200", 1920, 1200}
7235         };
7236
7237         n = ARRAY_SIZE(common_modes);
7238
7239         for (i = 0; i < n; i++) {
7240                 struct drm_display_mode *curmode = NULL;
7241                 bool mode_existed = false;
7242
7243                 if (common_modes[i].w > native_mode->hdisplay ||
7244                     common_modes[i].h > native_mode->vdisplay ||
7245                    (common_modes[i].w == native_mode->hdisplay &&
7246                     common_modes[i].h == native_mode->vdisplay))
7247                         continue;
7248
7249                 list_for_each_entry(curmode, &connector->probed_modes, head) {
7250                         if (common_modes[i].w == curmode->hdisplay &&
7251                             common_modes[i].h == curmode->vdisplay) {
7252                                 mode_existed = true;
7253                                 break;
7254                         }
7255                 }
7256
7257                 if (mode_existed)
7258                         continue;
7259
7260                 mode = amdgpu_dm_create_common_mode(encoder,
7261                                 common_modes[i].name, common_modes[i].w,
7262                                 common_modes[i].h);
7263                 drm_mode_probed_add(connector, mode);
7264                 amdgpu_dm_connector->num_modes++;
7265         }
7266 }
7267
7268 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7269                                               struct edid *edid)
7270 {
7271         struct amdgpu_dm_connector *amdgpu_dm_connector =
7272                         to_amdgpu_dm_connector(connector);
7273
7274         if (edid) {
7275                 /* empty probed_modes */
7276                 INIT_LIST_HEAD(&connector->probed_modes);
7277                 amdgpu_dm_connector->num_modes =
7278                                 drm_add_edid_modes(connector, edid);
7279
7280                 /* sorting the probed modes before calling function
7281                  * amdgpu_dm_get_native_mode() since EDID can have
7282                  * more than one preferred mode. The modes that are
7283                  * later in the probed mode list could be of higher
7284                  * and preferred resolution. For example, 3840x2160
7285                  * resolution in base EDID preferred timing and 4096x2160
7286                  * preferred resolution in DID extension block later.
7287                  */
7288                 drm_mode_sort(&connector->probed_modes);
7289                 amdgpu_dm_get_native_mode(connector);
7290
7291                 /* Freesync capabilities are reset by calling
7292                  * drm_add_edid_modes() and need to be
7293                  * restored here.
7294                  */
7295                 amdgpu_dm_update_freesync_caps(connector, edid);
7296         } else {
7297                 amdgpu_dm_connector->num_modes = 0;
7298         }
7299 }
7300
7301 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7302                               struct drm_display_mode *mode)
7303 {
7304         struct drm_display_mode *m;
7305
7306         list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7307                 if (drm_mode_equal(m, mode))
7308                         return true;
7309         }
7310
7311         return false;
7312 }
7313
7314 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7315 {
7316         const struct drm_display_mode *m;
7317         struct drm_display_mode *new_mode;
7318         uint i;
7319         uint32_t new_modes_count = 0;
7320
7321         /* Standard FPS values
7322          *
7323          * 23.976   - TV/NTSC
7324          * 24       - Cinema
7325          * 25       - TV/PAL
7326          * 29.97    - TV/NTSC
7327          * 30       - TV/NTSC
7328          * 48       - Cinema HFR
7329          * 50       - TV/PAL
7330          * 60       - Commonly used
7331          * 48,72,96 - Multiples of 24
7332          */
7333         const uint32_t common_rates[] = { 23976, 24000, 25000, 29970, 30000,
7334                                          48000, 50000, 60000, 72000, 96000 };
7335
7336         /*
7337          * Find mode with highest refresh rate with the same resolution
7338          * as the preferred mode. Some monitors report a preferred mode
7339          * with lower resolution than the highest refresh rate supported.
7340          */
7341
7342         m = get_highest_refresh_rate_mode(aconnector, true);
7343         if (!m)
7344                 return 0;
7345
7346         for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7347                 uint64_t target_vtotal, target_vtotal_diff;
7348                 uint64_t num, den;
7349
7350                 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7351                         continue;
7352
7353                 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7354                     common_rates[i] > aconnector->max_vfreq * 1000)
7355                         continue;
7356
7357                 num = (unsigned long long)m->clock * 1000 * 1000;
7358                 den = common_rates[i] * (unsigned long long)m->htotal;
7359                 target_vtotal = div_u64(num, den);
7360                 target_vtotal_diff = target_vtotal - m->vtotal;
7361
7362                 /* Check for illegal modes */
7363                 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7364                     m->vsync_end + target_vtotal_diff < m->vsync_start ||
7365                     m->vtotal + target_vtotal_diff < m->vsync_end)
7366                         continue;
7367
7368                 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7369                 if (!new_mode)
7370                         goto out;
7371
7372                 new_mode->vtotal += (u16)target_vtotal_diff;
7373                 new_mode->vsync_start += (u16)target_vtotal_diff;
7374                 new_mode->vsync_end += (u16)target_vtotal_diff;
7375                 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7376                 new_mode->type |= DRM_MODE_TYPE_DRIVER;
7377
7378                 if (!is_duplicate_mode(aconnector, new_mode)) {
7379                         drm_mode_probed_add(&aconnector->base, new_mode);
7380                         new_modes_count += 1;
7381                 } else
7382                         drm_mode_destroy(aconnector->base.dev, new_mode);
7383         }
7384  out:
7385         return new_modes_count;
7386 }
7387
7388 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7389                                                    struct edid *edid)
7390 {
7391         struct amdgpu_dm_connector *amdgpu_dm_connector =
7392                 to_amdgpu_dm_connector(connector);
7393
7394         if (!(amdgpu_freesync_vid_mode && edid))
7395                 return;
7396
7397         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7398                 amdgpu_dm_connector->num_modes +=
7399                         add_fs_modes(amdgpu_dm_connector);
7400 }
7401
7402 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
7403 {
7404         struct amdgpu_dm_connector *amdgpu_dm_connector =
7405                         to_amdgpu_dm_connector(connector);
7406         struct drm_encoder *encoder;
7407         struct edid *edid = amdgpu_dm_connector->edid;
7408
7409         encoder = amdgpu_dm_connector_to_encoder(connector);
7410
7411         if (!drm_edid_is_valid(edid)) {
7412                 amdgpu_dm_connector->num_modes =
7413                                 drm_add_modes_noedid(connector, 640, 480);
7414         } else {
7415                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
7416                 amdgpu_dm_connector_add_common_modes(encoder, connector);
7417                 amdgpu_dm_connector_add_freesync_modes(connector, edid);
7418         }
7419         amdgpu_dm_fbc_init(connector);
7420
7421         return amdgpu_dm_connector->num_modes;
7422 }
7423
7424 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7425                                      struct amdgpu_dm_connector *aconnector,
7426                                      int connector_type,
7427                                      struct dc_link *link,
7428                                      int link_index)
7429 {
7430         struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7431
7432         /*
7433          * Some of the properties below require access to state, like bpc.
7434          * Allocate some default initial connector state with our reset helper.
7435          */
7436         if (aconnector->base.funcs->reset)
7437                 aconnector->base.funcs->reset(&aconnector->base);
7438
7439         aconnector->connector_id = link_index;
7440         aconnector->dc_link = link;
7441         aconnector->base.interlace_allowed = false;
7442         aconnector->base.doublescan_allowed = false;
7443         aconnector->base.stereo_allowed = false;
7444         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7445         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7446         aconnector->audio_inst = -1;
7447         mutex_init(&aconnector->hpd_lock);
7448
7449         /*
7450          * configure support HPD hot plug connector_>polled default value is 0
7451          * which means HPD hot plug not supported
7452          */
7453         switch (connector_type) {
7454         case DRM_MODE_CONNECTOR_HDMIA:
7455                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7456                 aconnector->base.ycbcr_420_allowed =
7457                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7458                 break;
7459         case DRM_MODE_CONNECTOR_DisplayPort:
7460                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7461                 aconnector->base.ycbcr_420_allowed =
7462                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
7463                 break;
7464         case DRM_MODE_CONNECTOR_DVID:
7465                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7466                 break;
7467         default:
7468                 break;
7469         }
7470
7471         drm_object_attach_property(&aconnector->base.base,
7472                                 dm->ddev->mode_config.scaling_mode_property,
7473                                 DRM_MODE_SCALE_NONE);
7474
7475         drm_object_attach_property(&aconnector->base.base,
7476                                 adev->mode_info.underscan_property,
7477                                 UNDERSCAN_OFF);
7478         drm_object_attach_property(&aconnector->base.base,
7479                                 adev->mode_info.underscan_hborder_property,
7480                                 0);
7481         drm_object_attach_property(&aconnector->base.base,
7482                                 adev->mode_info.underscan_vborder_property,
7483                                 0);
7484
7485         if (!aconnector->mst_port)
7486                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7487
7488         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
7489         aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7490         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7491
7492         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7493             (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7494                 drm_object_attach_property(&aconnector->base.base,
7495                                 adev->mode_info.abm_level_property, 0);
7496         }
7497
7498         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7499             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7500             connector_type == DRM_MODE_CONNECTOR_eDP) {
7501                 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
7502
7503                 if (!aconnector->mst_port)
7504                         drm_connector_attach_vrr_capable_property(&aconnector->base);
7505
7506 #ifdef CONFIG_DRM_AMD_DC_HDCP
7507                 if (adev->dm.hdcp_workqueue)
7508                         drm_connector_attach_content_protection_property(&aconnector->base, true);
7509 #endif
7510         }
7511 }
7512
7513 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7514                               struct i2c_msg *msgs, int num)
7515 {
7516         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7517         struct ddc_service *ddc_service = i2c->ddc_service;
7518         struct i2c_command cmd;
7519         int i;
7520         int result = -EIO;
7521
7522         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7523
7524         if (!cmd.payloads)
7525                 return result;
7526
7527         cmd.number_of_payloads = num;
7528         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7529         cmd.speed = 100;
7530
7531         for (i = 0; i < num; i++) {
7532                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7533                 cmd.payloads[i].address = msgs[i].addr;
7534                 cmd.payloads[i].length = msgs[i].len;
7535                 cmd.payloads[i].data = msgs[i].buf;
7536         }
7537
7538         if (dc_submit_i2c(
7539                         ddc_service->ctx->dc,
7540                         ddc_service->ddc_pin->hw_info.ddc_channel,
7541                         &cmd))
7542                 result = num;
7543
7544         kfree(cmd.payloads);
7545         return result;
7546 }
7547
7548 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7549 {
7550         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7551 }
7552
7553 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7554         .master_xfer = amdgpu_dm_i2c_xfer,
7555         .functionality = amdgpu_dm_i2c_func,
7556 };
7557
7558 static struct amdgpu_i2c_adapter *
7559 create_i2c(struct ddc_service *ddc_service,
7560            int link_index,
7561            int *res)
7562 {
7563         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7564         struct amdgpu_i2c_adapter *i2c;
7565
7566         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7567         if (!i2c)
7568                 return NULL;
7569         i2c->base.owner = THIS_MODULE;
7570         i2c->base.class = I2C_CLASS_DDC;
7571         i2c->base.dev.parent = &adev->pdev->dev;
7572         i2c->base.algo = &amdgpu_dm_i2c_algo;
7573         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7574         i2c_set_adapdata(&i2c->base, i2c);
7575         i2c->ddc_service = ddc_service;
7576         i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7577
7578         return i2c;
7579 }
7580
7581
7582 /*
7583  * Note: this function assumes that dc_link_detect() was called for the
7584  * dc_link which will be represented by this aconnector.
7585  */
7586 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7587                                     struct amdgpu_dm_connector *aconnector,
7588                                     uint32_t link_index,
7589                                     struct amdgpu_encoder *aencoder)
7590 {
7591         int res = 0;
7592         int connector_type;
7593         struct dc *dc = dm->dc;
7594         struct dc_link *link = dc_get_link_at_index(dc, link_index);
7595         struct amdgpu_i2c_adapter *i2c;
7596
7597         link->priv = aconnector;
7598
7599         DRM_DEBUG_DRIVER("%s()\n", __func__);
7600
7601         i2c = create_i2c(link->ddc, link->link_index, &res);
7602         if (!i2c) {
7603                 DRM_ERROR("Failed to create i2c adapter data\n");
7604                 return -ENOMEM;
7605         }
7606
7607         aconnector->i2c = i2c;
7608         res = i2c_add_adapter(&i2c->base);
7609
7610         if (res) {
7611                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7612                 goto out_free;
7613         }
7614
7615         connector_type = to_drm_connector_type(link->connector_signal);
7616
7617         res = drm_connector_init_with_ddc(
7618                         dm->ddev,
7619                         &aconnector->base,
7620                         &amdgpu_dm_connector_funcs,
7621                         connector_type,
7622                         &i2c->base);
7623
7624         if (res) {
7625                 DRM_ERROR("connector_init failed\n");
7626                 aconnector->connector_id = -1;
7627                 goto out_free;
7628         }
7629
7630         drm_connector_helper_add(
7631                         &aconnector->base,
7632                         &amdgpu_dm_connector_helper_funcs);
7633
7634         amdgpu_dm_connector_init_helper(
7635                 dm,
7636                 aconnector,
7637                 connector_type,
7638                 link,
7639                 link_index);
7640
7641         drm_connector_attach_encoder(
7642                 &aconnector->base, &aencoder->base);
7643
7644         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7645                 || connector_type == DRM_MODE_CONNECTOR_eDP)
7646                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7647
7648 out_free:
7649         if (res) {
7650                 kfree(i2c);
7651                 aconnector->i2c = NULL;
7652         }
7653         return res;
7654 }
7655
7656 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7657 {
7658         switch (adev->mode_info.num_crtc) {
7659         case 1:
7660                 return 0x1;
7661         case 2:
7662                 return 0x3;
7663         case 3:
7664                 return 0x7;
7665         case 4:
7666                 return 0xf;
7667         case 5:
7668                 return 0x1f;
7669         case 6:
7670         default:
7671                 return 0x3f;
7672         }
7673 }
7674
7675 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7676                                   struct amdgpu_encoder *aencoder,
7677                                   uint32_t link_index)
7678 {
7679         struct amdgpu_device *adev = drm_to_adev(dev);
7680
7681         int res = drm_encoder_init(dev,
7682                                    &aencoder->base,
7683                                    &amdgpu_dm_encoder_funcs,
7684                                    DRM_MODE_ENCODER_TMDS,
7685                                    NULL);
7686
7687         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7688
7689         if (!res)
7690                 aencoder->encoder_id = link_index;
7691         else
7692                 aencoder->encoder_id = -1;
7693
7694         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7695
7696         return res;
7697 }
7698
7699 static void manage_dm_interrupts(struct amdgpu_device *adev,
7700                                  struct amdgpu_crtc *acrtc,
7701                                  bool enable)
7702 {
7703         /*
7704          * We have no guarantee that the frontend index maps to the same
7705          * backend index - some even map to more than one.
7706          *
7707          * TODO: Use a different interrupt or check DC itself for the mapping.
7708          */
7709         int irq_type =
7710                 amdgpu_display_crtc_idx_to_irq_type(
7711                         adev,
7712                         acrtc->crtc_id);
7713
7714         if (enable) {
7715                 drm_crtc_vblank_on(&acrtc->base);
7716                 amdgpu_irq_get(
7717                         adev,
7718                         &adev->pageflip_irq,
7719                         irq_type);
7720 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7721                 amdgpu_irq_get(
7722                         adev,
7723                         &adev->vline0_irq,
7724                         irq_type);
7725 #endif
7726         } else {
7727 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7728                 amdgpu_irq_put(
7729                         adev,
7730                         &adev->vline0_irq,
7731                         irq_type);
7732 #endif
7733                 amdgpu_irq_put(
7734                         adev,
7735                         &adev->pageflip_irq,
7736                         irq_type);
7737                 drm_crtc_vblank_off(&acrtc->base);
7738         }
7739 }
7740
7741 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7742                                       struct amdgpu_crtc *acrtc)
7743 {
7744         int irq_type =
7745                 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7746
7747         /**
7748          * This reads the current state for the IRQ and force reapplies
7749          * the setting to hardware.
7750          */
7751         amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7752 }
7753
7754 static bool
7755 is_scaling_state_different(const struct dm_connector_state *dm_state,
7756                            const struct dm_connector_state *old_dm_state)
7757 {
7758         if (dm_state->scaling != old_dm_state->scaling)
7759                 return true;
7760         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7761                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7762                         return true;
7763         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7764                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7765                         return true;
7766         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7767                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7768                 return true;
7769         return false;
7770 }
7771
7772 #ifdef CONFIG_DRM_AMD_DC_HDCP
7773 static bool is_content_protection_different(struct drm_connector_state *state,
7774                                             const struct drm_connector_state *old_state,
7775                                             const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7776 {
7777         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7778         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7779
7780         /* Handle: Type0/1 change */
7781         if (old_state->hdcp_content_type != state->hdcp_content_type &&
7782             state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7783                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7784                 return true;
7785         }
7786
7787         /* CP is being re enabled, ignore this
7788          *
7789          * Handles:     ENABLED -> DESIRED
7790          */
7791         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7792             state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7793                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7794                 return false;
7795         }
7796
7797         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7798          *
7799          * Handles:     UNDESIRED -> ENABLED
7800          */
7801         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7802             state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7803                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7804
7805         /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7806          * hot-plug, headless s3, dpms
7807          *
7808          * Handles:     DESIRED -> DESIRED (Special case)
7809          */
7810         if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7811             connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7812                 dm_con_state->update_hdcp = false;
7813                 return true;
7814         }
7815
7816         /*
7817          * Handles:     UNDESIRED -> UNDESIRED
7818          *              DESIRED -> DESIRED
7819          *              ENABLED -> ENABLED
7820          */
7821         if (old_state->content_protection == state->content_protection)
7822                 return false;
7823
7824         /*
7825          * Handles:     UNDESIRED -> DESIRED
7826          *              DESIRED -> UNDESIRED
7827          *              ENABLED -> UNDESIRED
7828          */
7829         if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7830                 return true;
7831
7832         /*
7833          * Handles:     DESIRED -> ENABLED
7834          */
7835         return false;
7836 }
7837
7838 #endif
7839 static void remove_stream(struct amdgpu_device *adev,
7840                           struct amdgpu_crtc *acrtc,
7841                           struct dc_stream_state *stream)
7842 {
7843         /* this is the update mode case */
7844
7845         acrtc->otg_inst = -1;
7846         acrtc->enabled = false;
7847 }
7848
7849 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7850                                struct dc_cursor_position *position)
7851 {
7852         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7853         int x, y;
7854         int xorigin = 0, yorigin = 0;
7855
7856         if (!crtc || !plane->state->fb)
7857                 return 0;
7858
7859         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7860             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7861                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7862                           __func__,
7863                           plane->state->crtc_w,
7864                           plane->state->crtc_h);
7865                 return -EINVAL;
7866         }
7867
7868         x = plane->state->crtc_x;
7869         y = plane->state->crtc_y;
7870
7871         if (x <= -amdgpu_crtc->max_cursor_width ||
7872             y <= -amdgpu_crtc->max_cursor_height)
7873                 return 0;
7874
7875         if (x < 0) {
7876                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
7877                 x = 0;
7878         }
7879         if (y < 0) {
7880                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
7881                 y = 0;
7882         }
7883         position->enable = true;
7884         position->translate_by_source = true;
7885         position->x = x;
7886         position->y = y;
7887         position->x_hotspot = xorigin;
7888         position->y_hotspot = yorigin;
7889
7890         return 0;
7891 }
7892
7893 static void handle_cursor_update(struct drm_plane *plane,
7894                                  struct drm_plane_state *old_plane_state)
7895 {
7896         struct amdgpu_device *adev = drm_to_adev(plane->dev);
7897         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
7898         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
7899         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
7900         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7901         uint64_t address = afb ? afb->address : 0;
7902         struct dc_cursor_position position = {0};
7903         struct dc_cursor_attributes attributes;
7904         int ret;
7905
7906         if (!plane->state->fb && !old_plane_state->fb)
7907                 return;
7908
7909         DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
7910                       __func__,
7911                       amdgpu_crtc->crtc_id,
7912                       plane->state->crtc_w,
7913                       plane->state->crtc_h);
7914
7915         ret = get_cursor_position(plane, crtc, &position);
7916         if (ret)
7917                 return;
7918
7919         if (!position.enable) {
7920                 /* turn off cursor */
7921                 if (crtc_state && crtc_state->stream) {
7922                         mutex_lock(&adev->dm.dc_lock);
7923                         dc_stream_set_cursor_position(crtc_state->stream,
7924                                                       &position);
7925                         mutex_unlock(&adev->dm.dc_lock);
7926                 }
7927                 return;
7928         }
7929
7930         amdgpu_crtc->cursor_width = plane->state->crtc_w;
7931         amdgpu_crtc->cursor_height = plane->state->crtc_h;
7932
7933         memset(&attributes, 0, sizeof(attributes));
7934         attributes.address.high_part = upper_32_bits(address);
7935         attributes.address.low_part  = lower_32_bits(address);
7936         attributes.width             = plane->state->crtc_w;
7937         attributes.height            = plane->state->crtc_h;
7938         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
7939         attributes.rotation_angle    = 0;
7940         attributes.attribute_flags.value = 0;
7941
7942         attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
7943
7944         if (crtc_state->stream) {
7945                 mutex_lock(&adev->dm.dc_lock);
7946                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
7947                                                          &attributes))
7948                         DRM_ERROR("DC failed to set cursor attributes\n");
7949
7950                 if (!dc_stream_set_cursor_position(crtc_state->stream,
7951                                                    &position))
7952                         DRM_ERROR("DC failed to set cursor position\n");
7953                 mutex_unlock(&adev->dm.dc_lock);
7954         }
7955 }
7956
7957 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7958 {
7959
7960         assert_spin_locked(&acrtc->base.dev->event_lock);
7961         WARN_ON(acrtc->event);
7962
7963         acrtc->event = acrtc->base.state->event;
7964
7965         /* Set the flip status */
7966         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7967
7968         /* Mark this event as consumed */
7969         acrtc->base.state->event = NULL;
7970
7971         DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7972                      acrtc->crtc_id);
7973 }
7974
7975 static void update_freesync_state_on_stream(
7976         struct amdgpu_display_manager *dm,
7977         struct dm_crtc_state *new_crtc_state,
7978         struct dc_stream_state *new_stream,
7979         struct dc_plane_state *surface,
7980         u32 flip_timestamp_in_us)
7981 {
7982         struct mod_vrr_params vrr_params;
7983         struct dc_info_packet vrr_infopacket = {0};
7984         struct amdgpu_device *adev = dm->adev;
7985         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7986         unsigned long flags;
7987         bool pack_sdp_v1_3 = false;
7988
7989         if (!new_stream)
7990                 return;
7991
7992         /*
7993          * TODO: Determine why min/max totals and vrefresh can be 0 here.
7994          * For now it's sufficient to just guard against these conditions.
7995          */
7996
7997         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7998                 return;
7999
8000         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8001         vrr_params = acrtc->dm_irq_params.vrr_params;
8002
8003         if (surface) {
8004                 mod_freesync_handle_preflip(
8005                         dm->freesync_module,
8006                         surface,
8007                         new_stream,
8008                         flip_timestamp_in_us,
8009                         &vrr_params);
8010
8011                 if (adev->family < AMDGPU_FAMILY_AI &&
8012                     amdgpu_dm_vrr_active(new_crtc_state)) {
8013                         mod_freesync_handle_v_update(dm->freesync_module,
8014                                                      new_stream, &vrr_params);
8015
8016                         /* Need to call this before the frame ends. */
8017                         dc_stream_adjust_vmin_vmax(dm->dc,
8018                                                    new_crtc_state->stream,
8019                                                    &vrr_params.adjust);
8020                 }
8021         }
8022
8023         mod_freesync_build_vrr_infopacket(
8024                 dm->freesync_module,
8025                 new_stream,
8026                 &vrr_params,
8027                 PACKET_TYPE_VRR,
8028                 TRANSFER_FUNC_UNKNOWN,
8029                 &vrr_infopacket,
8030                 pack_sdp_v1_3);
8031
8032         new_crtc_state->freesync_timing_changed |=
8033                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8034                         &vrr_params.adjust,
8035                         sizeof(vrr_params.adjust)) != 0);
8036
8037         new_crtc_state->freesync_vrr_info_changed |=
8038                 (memcmp(&new_crtc_state->vrr_infopacket,
8039                         &vrr_infopacket,
8040                         sizeof(vrr_infopacket)) != 0);
8041
8042         acrtc->dm_irq_params.vrr_params = vrr_params;
8043         new_crtc_state->vrr_infopacket = vrr_infopacket;
8044
8045         new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8046         new_stream->vrr_infopacket = vrr_infopacket;
8047
8048         if (new_crtc_state->freesync_vrr_info_changed)
8049                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8050                               new_crtc_state->base.crtc->base.id,
8051                               (int)new_crtc_state->base.vrr_enabled,
8052                               (int)vrr_params.state);
8053
8054         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8055 }
8056
8057 static void update_stream_irq_parameters(
8058         struct amdgpu_display_manager *dm,
8059         struct dm_crtc_state *new_crtc_state)
8060 {
8061         struct dc_stream_state *new_stream = new_crtc_state->stream;
8062         struct mod_vrr_params vrr_params;
8063         struct mod_freesync_config config = new_crtc_state->freesync_config;
8064         struct amdgpu_device *adev = dm->adev;
8065         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8066         unsigned long flags;
8067
8068         if (!new_stream)
8069                 return;
8070
8071         /*
8072          * TODO: Determine why min/max totals and vrefresh can be 0 here.
8073          * For now it's sufficient to just guard against these conditions.
8074          */
8075         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8076                 return;
8077
8078         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8079         vrr_params = acrtc->dm_irq_params.vrr_params;
8080
8081         if (new_crtc_state->vrr_supported &&
8082             config.min_refresh_in_uhz &&
8083             config.max_refresh_in_uhz) {
8084                 /*
8085                  * if freesync compatible mode was set, config.state will be set
8086                  * in atomic check
8087                  */
8088                 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8089                     (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8090                      new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8091                         vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8092                         vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8093                         vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8094                         vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8095                 } else {
8096                         config.state = new_crtc_state->base.vrr_enabled ?
8097                                                      VRR_STATE_ACTIVE_VARIABLE :
8098                                                      VRR_STATE_INACTIVE;
8099                 }
8100         } else {
8101                 config.state = VRR_STATE_UNSUPPORTED;
8102         }
8103
8104         mod_freesync_build_vrr_params(dm->freesync_module,
8105                                       new_stream,
8106                                       &config, &vrr_params);
8107
8108         new_crtc_state->freesync_timing_changed |=
8109                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8110                         &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8111
8112         new_crtc_state->freesync_config = config;
8113         /* Copy state for access from DM IRQ handler */
8114         acrtc->dm_irq_params.freesync_config = config;
8115         acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8116         acrtc->dm_irq_params.vrr_params = vrr_params;
8117         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8118 }
8119
8120 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8121                                             struct dm_crtc_state *new_state)
8122 {
8123         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8124         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8125
8126         if (!old_vrr_active && new_vrr_active) {
8127                 /* Transition VRR inactive -> active:
8128                  * While VRR is active, we must not disable vblank irq, as a
8129                  * reenable after disable would compute bogus vblank/pflip
8130                  * timestamps if it likely happened inside display front-porch.
8131                  *
8132                  * We also need vupdate irq for the actual core vblank handling
8133                  * at end of vblank.
8134                  */
8135                 dm_set_vupdate_irq(new_state->base.crtc, true);
8136                 drm_crtc_vblank_get(new_state->base.crtc);
8137                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8138                                  __func__, new_state->base.crtc->base.id);
8139         } else if (old_vrr_active && !new_vrr_active) {
8140                 /* Transition VRR active -> inactive:
8141                  * Allow vblank irq disable again for fixed refresh rate.
8142                  */
8143                 dm_set_vupdate_irq(new_state->base.crtc, false);
8144                 drm_crtc_vblank_put(new_state->base.crtc);
8145                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8146                                  __func__, new_state->base.crtc->base.id);
8147         }
8148 }
8149
8150 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8151 {
8152         struct drm_plane *plane;
8153         struct drm_plane_state *old_plane_state, *new_plane_state;
8154         int i;
8155
8156         /*
8157          * TODO: Make this per-stream so we don't issue redundant updates for
8158          * commits with multiple streams.
8159          */
8160         for_each_oldnew_plane_in_state(state, plane, old_plane_state,
8161                                        new_plane_state, i)
8162                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8163                         handle_cursor_update(plane, old_plane_state);
8164 }
8165
8166 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8167                                     struct dc_state *dc_state,
8168                                     struct drm_device *dev,
8169                                     struct amdgpu_display_manager *dm,
8170                                     struct drm_crtc *pcrtc,
8171                                     bool wait_for_vblank)
8172 {
8173         uint32_t i;
8174         uint64_t timestamp_ns;
8175         struct drm_plane *plane;
8176         struct drm_plane_state *old_plane_state, *new_plane_state;
8177         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8178         struct drm_crtc_state *new_pcrtc_state =
8179                         drm_atomic_get_new_crtc_state(state, pcrtc);
8180         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8181         struct dm_crtc_state *dm_old_crtc_state =
8182                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8183         int planes_count = 0, vpos, hpos;
8184         long r;
8185         unsigned long flags;
8186         struct amdgpu_bo *abo;
8187         uint32_t target_vblank, last_flip_vblank;
8188         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8189         bool pflip_present = false;
8190         struct {
8191                 struct dc_surface_update surface_updates[MAX_SURFACES];
8192                 struct dc_plane_info plane_infos[MAX_SURFACES];
8193                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8194                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8195                 struct dc_stream_update stream_update;
8196         } *bundle;
8197
8198         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8199
8200         if (!bundle) {
8201                 dm_error("Failed to allocate update bundle\n");
8202                 goto cleanup;
8203         }
8204
8205         /*
8206          * Disable the cursor first if we're disabling all the planes.
8207          * It'll remain on the screen after the planes are re-enabled
8208          * if we don't.
8209          */
8210         if (acrtc_state->active_planes == 0)
8211                 amdgpu_dm_commit_cursors(state);
8212
8213         /* update planes when needed */
8214         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8215                 struct drm_crtc *crtc = new_plane_state->crtc;
8216                 struct drm_crtc_state *new_crtc_state;
8217                 struct drm_framebuffer *fb = new_plane_state->fb;
8218                 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8219                 bool plane_needs_flip;
8220                 struct dc_plane_state *dc_plane;
8221                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8222
8223                 /* Cursor plane is handled after stream updates */
8224                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8225                         continue;
8226
8227                 if (!fb || !crtc || pcrtc != crtc)
8228                         continue;
8229
8230                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8231                 if (!new_crtc_state->active)
8232                         continue;
8233
8234                 dc_plane = dm_new_plane_state->dc_state;
8235
8236                 bundle->surface_updates[planes_count].surface = dc_plane;
8237                 if (new_pcrtc_state->color_mgmt_changed) {
8238                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8239                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8240                         bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8241                 }
8242
8243                 fill_dc_scaling_info(new_plane_state,
8244                                      &bundle->scaling_infos[planes_count]);
8245
8246                 bundle->surface_updates[planes_count].scaling_info =
8247                         &bundle->scaling_infos[planes_count];
8248
8249                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8250
8251                 pflip_present = pflip_present || plane_needs_flip;
8252
8253                 if (!plane_needs_flip) {
8254                         planes_count += 1;
8255                         continue;
8256                 }
8257
8258                 abo = gem_to_amdgpu_bo(fb->obj[0]);
8259
8260                 /*
8261                  * Wait for all fences on this FB. Do limited wait to avoid
8262                  * deadlock during GPU reset when this fence will not signal
8263                  * but we hold reservation lock for the BO.
8264                  */
8265                 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
8266                                                         false,
8267                                                         msecs_to_jiffies(5000));
8268                 if (unlikely(r <= 0))
8269                         DRM_ERROR("Waiting for fences timed out!");
8270
8271                 fill_dc_plane_info_and_addr(
8272                         dm->adev, new_plane_state,
8273                         afb->tiling_flags,
8274                         &bundle->plane_infos[planes_count],
8275                         &bundle->flip_addrs[planes_count].address,
8276                         afb->tmz_surface, false);
8277
8278                 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
8279                                  new_plane_state->plane->index,
8280                                  bundle->plane_infos[planes_count].dcc.enable);
8281
8282                 bundle->surface_updates[planes_count].plane_info =
8283                         &bundle->plane_infos[planes_count];
8284
8285                 /*
8286                  * Only allow immediate flips for fast updates that don't
8287                  * change FB pitch, DCC state, rotation or mirroing.
8288                  */
8289                 bundle->flip_addrs[planes_count].flip_immediate =
8290                         crtc->state->async_flip &&
8291                         acrtc_state->update_type == UPDATE_TYPE_FAST;
8292
8293                 timestamp_ns = ktime_get_ns();
8294                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8295                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8296                 bundle->surface_updates[planes_count].surface = dc_plane;
8297
8298                 if (!bundle->surface_updates[planes_count].surface) {
8299                         DRM_ERROR("No surface for CRTC: id=%d\n",
8300                                         acrtc_attach->crtc_id);
8301                         continue;
8302                 }
8303
8304                 if (plane == pcrtc->primary)
8305                         update_freesync_state_on_stream(
8306                                 dm,
8307                                 acrtc_state,
8308                                 acrtc_state->stream,
8309                                 dc_plane,
8310                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
8311
8312                 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
8313                                  __func__,
8314                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8315                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
8316
8317                 planes_count += 1;
8318
8319         }
8320
8321         if (pflip_present) {
8322                 if (!vrr_active) {
8323                         /* Use old throttling in non-vrr fixed refresh rate mode
8324                          * to keep flip scheduling based on target vblank counts
8325                          * working in a backwards compatible way, e.g., for
8326                          * clients using the GLX_OML_sync_control extension or
8327                          * DRI3/Present extension with defined target_msc.
8328                          */
8329                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
8330                 }
8331                 else {
8332                         /* For variable refresh rate mode only:
8333                          * Get vblank of last completed flip to avoid > 1 vrr
8334                          * flips per video frame by use of throttling, but allow
8335                          * flip programming anywhere in the possibly large
8336                          * variable vrr vblank interval for fine-grained flip
8337                          * timing control and more opportunity to avoid stutter
8338                          * on late submission of flips.
8339                          */
8340                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8341                         last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
8342                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8343                 }
8344
8345                 target_vblank = last_flip_vblank + wait_for_vblank;
8346
8347                 /*
8348                  * Wait until we're out of the vertical blank period before the one
8349                  * targeted by the flip
8350                  */
8351                 while ((acrtc_attach->enabled &&
8352                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8353                                                             0, &vpos, &hpos, NULL,
8354                                                             NULL, &pcrtc->hwmode)
8355                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8356                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8357                         (int)(target_vblank -
8358                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8359                         usleep_range(1000, 1100);
8360                 }
8361
8362                 /**
8363                  * Prepare the flip event for the pageflip interrupt to handle.
8364                  *
8365                  * This only works in the case where we've already turned on the
8366                  * appropriate hardware blocks (eg. HUBP) so in the transition case
8367                  * from 0 -> n planes we have to skip a hardware generated event
8368                  * and rely on sending it from software.
8369                  */
8370                 if (acrtc_attach->base.state->event &&
8371                     acrtc_state->active_planes > 0) {
8372                         drm_crtc_vblank_get(pcrtc);
8373
8374                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8375
8376                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8377                         prepare_flip_isr(acrtc_attach);
8378
8379                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8380                 }
8381
8382                 if (acrtc_state->stream) {
8383                         if (acrtc_state->freesync_vrr_info_changed)
8384                                 bundle->stream_update.vrr_infopacket =
8385                                         &acrtc_state->stream->vrr_infopacket;
8386                 }
8387         }
8388
8389         /* Update the planes if changed or disable if we don't have any. */
8390         if ((planes_count || acrtc_state->active_planes == 0) &&
8391                 acrtc_state->stream) {
8392                 bundle->stream_update.stream = acrtc_state->stream;
8393                 if (new_pcrtc_state->mode_changed) {
8394                         bundle->stream_update.src = acrtc_state->stream->src;
8395                         bundle->stream_update.dst = acrtc_state->stream->dst;
8396                 }
8397
8398                 if (new_pcrtc_state->color_mgmt_changed) {
8399                         /*
8400                          * TODO: This isn't fully correct since we've actually
8401                          * already modified the stream in place.
8402                          */
8403                         bundle->stream_update.gamut_remap =
8404                                 &acrtc_state->stream->gamut_remap_matrix;
8405                         bundle->stream_update.output_csc_transform =
8406                                 &acrtc_state->stream->csc_color_matrix;
8407                         bundle->stream_update.out_transfer_func =
8408                                 acrtc_state->stream->out_transfer_func;
8409                 }
8410
8411                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
8412                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
8413                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
8414
8415                 /*
8416                  * If FreeSync state on the stream has changed then we need to
8417                  * re-adjust the min/max bounds now that DC doesn't handle this
8418                  * as part of commit.
8419                  */
8420                 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
8421                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8422                         dc_stream_adjust_vmin_vmax(
8423                                 dm->dc, acrtc_state->stream,
8424                                 &acrtc_attach->dm_irq_params.vrr_params.adjust);
8425                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8426                 }
8427                 mutex_lock(&dm->dc_lock);
8428                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8429                                 acrtc_state->stream->link->psr_settings.psr_allow_active)
8430                         amdgpu_dm_psr_disable(acrtc_state->stream);
8431
8432                 dc_commit_updates_for_stream(dm->dc,
8433                                                      bundle->surface_updates,
8434                                                      planes_count,
8435                                                      acrtc_state->stream,
8436                                                      &bundle->stream_update,
8437                                                      dc_state);
8438
8439                 /**
8440                  * Enable or disable the interrupts on the backend.
8441                  *
8442                  * Most pipes are put into power gating when unused.
8443                  *
8444                  * When power gating is enabled on a pipe we lose the
8445                  * interrupt enablement state when power gating is disabled.
8446                  *
8447                  * So we need to update the IRQ control state in hardware
8448                  * whenever the pipe turns on (since it could be previously
8449                  * power gated) or off (since some pipes can't be power gated
8450                  * on some ASICs).
8451                  */
8452                 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8453                         dm_update_pflip_irq_state(drm_to_adev(dev),
8454                                                   acrtc_attach);
8455
8456                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8457                                 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8458                                 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8459                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
8460                 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
8461                                 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8462                                 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
8463                         amdgpu_dm_psr_enable(acrtc_state->stream);
8464                 }
8465
8466                 mutex_unlock(&dm->dc_lock);
8467         }
8468
8469         /*
8470          * Update cursor state *after* programming all the planes.
8471          * This avoids redundant programming in the case where we're going
8472          * to be disabling a single plane - those pipes are being disabled.
8473          */
8474         if (acrtc_state->active_planes)
8475                 amdgpu_dm_commit_cursors(state);
8476
8477 cleanup:
8478         kfree(bundle);
8479 }
8480
8481 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8482                                    struct drm_atomic_state *state)
8483 {
8484         struct amdgpu_device *adev = drm_to_adev(dev);
8485         struct amdgpu_dm_connector *aconnector;
8486         struct drm_connector *connector;
8487         struct drm_connector_state *old_con_state, *new_con_state;
8488         struct drm_crtc_state *new_crtc_state;
8489         struct dm_crtc_state *new_dm_crtc_state;
8490         const struct dc_stream_status *status;
8491         int i, inst;
8492
8493         /* Notify device removals. */
8494         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8495                 if (old_con_state->crtc != new_con_state->crtc) {
8496                         /* CRTC changes require notification. */
8497                         goto notify;
8498                 }
8499
8500                 if (!new_con_state->crtc)
8501                         continue;
8502
8503                 new_crtc_state = drm_atomic_get_new_crtc_state(
8504                         state, new_con_state->crtc);
8505
8506                 if (!new_crtc_state)
8507                         continue;
8508
8509                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8510                         continue;
8511
8512         notify:
8513                 aconnector = to_amdgpu_dm_connector(connector);
8514
8515                 mutex_lock(&adev->dm.audio_lock);
8516                 inst = aconnector->audio_inst;
8517                 aconnector->audio_inst = -1;
8518                 mutex_unlock(&adev->dm.audio_lock);
8519
8520                 amdgpu_dm_audio_eld_notify(adev, inst);
8521         }
8522
8523         /* Notify audio device additions. */
8524         for_each_new_connector_in_state(state, connector, new_con_state, i) {
8525                 if (!new_con_state->crtc)
8526                         continue;
8527
8528                 new_crtc_state = drm_atomic_get_new_crtc_state(
8529                         state, new_con_state->crtc);
8530
8531                 if (!new_crtc_state)
8532                         continue;
8533
8534                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8535                         continue;
8536
8537                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8538                 if (!new_dm_crtc_state->stream)
8539                         continue;
8540
8541                 status = dc_stream_get_status(new_dm_crtc_state->stream);
8542                 if (!status)
8543                         continue;
8544
8545                 aconnector = to_amdgpu_dm_connector(connector);
8546
8547                 mutex_lock(&adev->dm.audio_lock);
8548                 inst = status->audio_inst;
8549                 aconnector->audio_inst = inst;
8550                 mutex_unlock(&adev->dm.audio_lock);
8551
8552                 amdgpu_dm_audio_eld_notify(adev, inst);
8553         }
8554 }
8555
8556 /*
8557  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8558  * @crtc_state: the DRM CRTC state
8559  * @stream_state: the DC stream state.
8560  *
8561  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8562  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8563  */
8564 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8565                                                 struct dc_stream_state *stream_state)
8566 {
8567         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8568 }
8569
8570 /**
8571  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8572  * @state: The atomic state to commit
8573  *
8574  * This will tell DC to commit the constructed DC state from atomic_check,
8575  * programming the hardware. Any failures here implies a hardware failure, since
8576  * atomic check should have filtered anything non-kosher.
8577  */
8578 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8579 {
8580         struct drm_device *dev = state->dev;
8581         struct amdgpu_device *adev = drm_to_adev(dev);
8582         struct amdgpu_display_manager *dm = &adev->dm;
8583         struct dm_atomic_state *dm_state;
8584         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8585         uint32_t i, j;
8586         struct drm_crtc *crtc;
8587         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8588         unsigned long flags;
8589         bool wait_for_vblank = true;
8590         struct drm_connector *connector;
8591         struct drm_connector_state *old_con_state, *new_con_state;
8592         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8593         int crtc_disable_count = 0;
8594         bool mode_set_reset_required = false;
8595
8596         trace_amdgpu_dm_atomic_commit_tail_begin(state);
8597
8598         drm_atomic_helper_update_legacy_modeset_state(dev, state);
8599
8600         dm_state = dm_atomic_get_new_state(state);
8601         if (dm_state && dm_state->context) {
8602                 dc_state = dm_state->context;
8603         } else {
8604                 /* No state changes, retain current state. */
8605                 dc_state_temp = dc_create_state(dm->dc);
8606                 ASSERT(dc_state_temp);
8607                 dc_state = dc_state_temp;
8608                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8609         }
8610
8611         for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8612                                        new_crtc_state, i) {
8613                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8614
8615                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8616
8617                 if (old_crtc_state->active &&
8618                     (!new_crtc_state->active ||
8619                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8620                         manage_dm_interrupts(adev, acrtc, false);
8621                         dc_stream_release(dm_old_crtc_state->stream);
8622                 }
8623         }
8624
8625         drm_atomic_helper_calc_timestamping_constants(state);
8626
8627         /* update changed items */
8628         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8629                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8630
8631                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8632                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8633
8634                 DRM_DEBUG_ATOMIC(
8635                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8636                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8637                         "connectors_changed:%d\n",
8638                         acrtc->crtc_id,
8639                         new_crtc_state->enable,
8640                         new_crtc_state->active,
8641                         new_crtc_state->planes_changed,
8642                         new_crtc_state->mode_changed,
8643                         new_crtc_state->active_changed,
8644                         new_crtc_state->connectors_changed);
8645
8646                 /* Disable cursor if disabling crtc */
8647                 if (old_crtc_state->active && !new_crtc_state->active) {
8648                         struct dc_cursor_position position;
8649
8650                         memset(&position, 0, sizeof(position));
8651                         mutex_lock(&dm->dc_lock);
8652                         dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8653                         mutex_unlock(&dm->dc_lock);
8654                 }
8655
8656                 /* Copy all transient state flags into dc state */
8657                 if (dm_new_crtc_state->stream) {
8658                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8659                                                             dm_new_crtc_state->stream);
8660                 }
8661
8662                 /* handles headless hotplug case, updating new_state and
8663                  * aconnector as needed
8664                  */
8665
8666                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8667
8668                         DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8669
8670                         if (!dm_new_crtc_state->stream) {
8671                                 /*
8672                                  * this could happen because of issues with
8673                                  * userspace notifications delivery.
8674                                  * In this case userspace tries to set mode on
8675                                  * display which is disconnected in fact.
8676                                  * dc_sink is NULL in this case on aconnector.
8677                                  * We expect reset mode will come soon.
8678                                  *
8679                                  * This can also happen when unplug is done
8680                                  * during resume sequence ended
8681                                  *
8682                                  * In this case, we want to pretend we still
8683                                  * have a sink to keep the pipe running so that
8684                                  * hw state is consistent with the sw state
8685                                  */
8686                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8687                                                 __func__, acrtc->base.base.id);
8688                                 continue;
8689                         }
8690
8691                         if (dm_old_crtc_state->stream)
8692                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8693
8694                         pm_runtime_get_noresume(dev->dev);
8695
8696                         acrtc->enabled = true;
8697                         acrtc->hw_mode = new_crtc_state->mode;
8698                         crtc->hwmode = new_crtc_state->mode;
8699                         mode_set_reset_required = true;
8700                 } else if (modereset_required(new_crtc_state)) {
8701                         DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8702                         /* i.e. reset mode */
8703                         if (dm_old_crtc_state->stream)
8704                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8705
8706                         mode_set_reset_required = true;
8707                 }
8708         } /* for_each_crtc_in_state() */
8709
8710         if (dc_state) {
8711                 /* if there mode set or reset, disable eDP PSR */
8712                 if (mode_set_reset_required)
8713                         amdgpu_dm_psr_disable_all(dm);
8714
8715                 dm_enable_per_frame_crtc_master_sync(dc_state);
8716                 mutex_lock(&dm->dc_lock);
8717                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
8718 #if defined(CONFIG_DRM_AMD_DC_DCN)
8719                /* Allow idle optimization when vblank count is 0 for display off */
8720                if (dm->active_vblank_irq_count == 0)
8721                    dc_allow_idle_optimizations(dm->dc,true);
8722 #endif
8723                 mutex_unlock(&dm->dc_lock);
8724         }
8725
8726         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8727                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8728
8729                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8730
8731                 if (dm_new_crtc_state->stream != NULL) {
8732                         const struct dc_stream_status *status =
8733                                         dc_stream_get_status(dm_new_crtc_state->stream);
8734
8735                         if (!status)
8736                                 status = dc_stream_get_status_from_state(dc_state,
8737                                                                          dm_new_crtc_state->stream);
8738                         if (!status)
8739                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8740                         else
8741                                 acrtc->otg_inst = status->primary_otg_inst;
8742                 }
8743         }
8744 #ifdef CONFIG_DRM_AMD_DC_HDCP
8745         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8746                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8747                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8748                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8749
8750                 new_crtc_state = NULL;
8751
8752                 if (acrtc)
8753                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8754
8755                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8756
8757                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8758                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8759                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8760                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8761                         dm_new_con_state->update_hdcp = true;
8762                         continue;
8763                 }
8764
8765                 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8766                         hdcp_update_display(
8767                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8768                                 new_con_state->hdcp_content_type,
8769                                 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
8770         }
8771 #endif
8772
8773         /* Handle connector state changes */
8774         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8775                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8776                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8777                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8778                 struct dc_surface_update dummy_updates[MAX_SURFACES];
8779                 struct dc_stream_update stream_update;
8780                 struct dc_info_packet hdr_packet;
8781                 struct dc_stream_status *status = NULL;
8782                 bool abm_changed, hdr_changed, scaling_changed;
8783
8784                 memset(&dummy_updates, 0, sizeof(dummy_updates));
8785                 memset(&stream_update, 0, sizeof(stream_update));
8786
8787                 if (acrtc) {
8788                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8789                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8790                 }
8791
8792                 /* Skip any modesets/resets */
8793                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8794                         continue;
8795
8796                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8797                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8798
8799                 scaling_changed = is_scaling_state_different(dm_new_con_state,
8800                                                              dm_old_con_state);
8801
8802                 abm_changed = dm_new_crtc_state->abm_level !=
8803                               dm_old_crtc_state->abm_level;
8804
8805                 hdr_changed =
8806                         is_hdr_metadata_different(old_con_state, new_con_state);
8807
8808                 if (!scaling_changed && !abm_changed && !hdr_changed)
8809                         continue;
8810
8811                 stream_update.stream = dm_new_crtc_state->stream;
8812                 if (scaling_changed) {
8813                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8814                                         dm_new_con_state, dm_new_crtc_state->stream);
8815
8816                         stream_update.src = dm_new_crtc_state->stream->src;
8817                         stream_update.dst = dm_new_crtc_state->stream->dst;
8818                 }
8819
8820                 if (abm_changed) {
8821                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8822
8823                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
8824                 }
8825
8826                 if (hdr_changed) {
8827                         fill_hdr_info_packet(new_con_state, &hdr_packet);
8828                         stream_update.hdr_static_metadata = &hdr_packet;
8829                 }
8830
8831                 status = dc_stream_get_status(dm_new_crtc_state->stream);
8832                 WARN_ON(!status);
8833                 WARN_ON(!status->plane_count);
8834
8835                 /*
8836                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
8837                  * Here we create an empty update on each plane.
8838                  * To fix this, DC should permit updating only stream properties.
8839                  */
8840                 for (j = 0; j < status->plane_count; j++)
8841                         dummy_updates[j].surface = status->plane_states[0];
8842
8843
8844                 mutex_lock(&dm->dc_lock);
8845                 dc_commit_updates_for_stream(dm->dc,
8846                                                      dummy_updates,
8847                                                      status->plane_count,
8848                                                      dm_new_crtc_state->stream,
8849                                                      &stream_update,
8850                                                      dc_state);
8851                 mutex_unlock(&dm->dc_lock);
8852         }
8853
8854         /* Count number of newly disabled CRTCs for dropping PM refs later. */
8855         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8856                                       new_crtc_state, i) {
8857                 if (old_crtc_state->active && !new_crtc_state->active)
8858                         crtc_disable_count++;
8859
8860                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8861                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8862
8863                 /* For freesync config update on crtc state and params for irq */
8864                 update_stream_irq_parameters(dm, dm_new_crtc_state);
8865
8866                 /* Handle vrr on->off / off->on transitions */
8867                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8868                                                 dm_new_crtc_state);
8869         }
8870
8871         /**
8872          * Enable interrupts for CRTCs that are newly enabled or went through
8873          * a modeset. It was intentionally deferred until after the front end
8874          * state was modified to wait until the OTG was on and so the IRQ
8875          * handlers didn't access stale or invalid state.
8876          */
8877         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8878                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8879 #ifdef CONFIG_DEBUG_FS
8880                 bool configure_crc = false;
8881                 enum amdgpu_dm_pipe_crc_source cur_crc_src;
8882 #endif
8883                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8884
8885                 if (new_crtc_state->active &&
8886                     (!old_crtc_state->active ||
8887                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8888                         dc_stream_retain(dm_new_crtc_state->stream);
8889                         acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8890                         manage_dm_interrupts(adev, acrtc, true);
8891
8892 #ifdef CONFIG_DEBUG_FS
8893                         /**
8894                          * Frontend may have changed so reapply the CRC capture
8895                          * settings for the stream.
8896                          */
8897                         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8898                         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8899                         cur_crc_src = acrtc->dm_irq_params.crc_src;
8900                         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8901
8902                         if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
8903                                 configure_crc = true;
8904 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8905                                 if (amdgpu_dm_crc_window_is_activated(crtc))
8906                                         configure_crc = false;
8907 #endif
8908                         }
8909
8910                         if (configure_crc)
8911                                 amdgpu_dm_crtc_configure_crc_source(
8912                                         crtc, dm_new_crtc_state, cur_crc_src);
8913 #endif
8914                 }
8915         }
8916
8917         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
8918                 if (new_crtc_state->async_flip)
8919                         wait_for_vblank = false;
8920
8921         /* update planes when needed per crtc*/
8922         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
8923                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8924
8925                 if (dm_new_crtc_state->stream)
8926                         amdgpu_dm_commit_planes(state, dc_state, dev,
8927                                                 dm, crtc, wait_for_vblank);
8928         }
8929
8930         /* Update audio instances for each connector. */
8931         amdgpu_dm_commit_audio(dev, state);
8932
8933         /*
8934          * send vblank event on all events not handled in flip and
8935          * mark consumed event for drm_atomic_helper_commit_hw_done
8936          */
8937         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8938         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8939
8940                 if (new_crtc_state->event)
8941                         drm_send_event_locked(dev, &new_crtc_state->event->base);
8942
8943                 new_crtc_state->event = NULL;
8944         }
8945         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8946
8947         /* Signal HW programming completion */
8948         drm_atomic_helper_commit_hw_done(state);
8949
8950         if (wait_for_vblank)
8951                 drm_atomic_helper_wait_for_flip_done(dev, state);
8952
8953         drm_atomic_helper_cleanup_planes(dev, state);
8954
8955         /* return the stolen vga memory back to VRAM */
8956         if (!adev->mman.keep_stolen_vga_memory)
8957                 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8958         amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8959
8960         /*
8961          * Finally, drop a runtime PM reference for each newly disabled CRTC,
8962          * so we can put the GPU into runtime suspend if we're not driving any
8963          * displays anymore
8964          */
8965         for (i = 0; i < crtc_disable_count; i++)
8966                 pm_runtime_put_autosuspend(dev->dev);
8967         pm_runtime_mark_last_busy(dev->dev);
8968
8969         if (dc_state_temp)
8970                 dc_release_state(dc_state_temp);
8971 }
8972
8973
8974 static int dm_force_atomic_commit(struct drm_connector *connector)
8975 {
8976         int ret = 0;
8977         struct drm_device *ddev = connector->dev;
8978         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8979         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8980         struct drm_plane *plane = disconnected_acrtc->base.primary;
8981         struct drm_connector_state *conn_state;
8982         struct drm_crtc_state *crtc_state;
8983         struct drm_plane_state *plane_state;
8984
8985         if (!state)
8986                 return -ENOMEM;
8987
8988         state->acquire_ctx = ddev->mode_config.acquire_ctx;
8989
8990         /* Construct an atomic state to restore previous display setting */
8991
8992         /*
8993          * Attach connectors to drm_atomic_state
8994          */
8995         conn_state = drm_atomic_get_connector_state(state, connector);
8996
8997         ret = PTR_ERR_OR_ZERO(conn_state);
8998         if (ret)
8999                 goto out;
9000
9001         /* Attach crtc to drm_atomic_state*/
9002         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9003
9004         ret = PTR_ERR_OR_ZERO(crtc_state);
9005         if (ret)
9006                 goto out;
9007
9008         /* force a restore */
9009         crtc_state->mode_changed = true;
9010
9011         /* Attach plane to drm_atomic_state */
9012         plane_state = drm_atomic_get_plane_state(state, plane);
9013
9014         ret = PTR_ERR_OR_ZERO(plane_state);
9015         if (ret)
9016                 goto out;
9017
9018         /* Call commit internally with the state we just constructed */
9019         ret = drm_atomic_commit(state);
9020
9021 out:
9022         drm_atomic_state_put(state);
9023         if (ret)
9024                 DRM_ERROR("Restoring old state failed with %i\n", ret);
9025
9026         return ret;
9027 }
9028
9029 /*
9030  * This function handles all cases when set mode does not come upon hotplug.
9031  * This includes when a display is unplugged then plugged back into the
9032  * same port and when running without usermode desktop manager supprot
9033  */
9034 void dm_restore_drm_connector_state(struct drm_device *dev,
9035                                     struct drm_connector *connector)
9036 {
9037         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9038         struct amdgpu_crtc *disconnected_acrtc;
9039         struct dm_crtc_state *acrtc_state;
9040
9041         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9042                 return;
9043
9044         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9045         if (!disconnected_acrtc)
9046                 return;
9047
9048         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9049         if (!acrtc_state->stream)
9050                 return;
9051
9052         /*
9053          * If the previous sink is not released and different from the current,
9054          * we deduce we are in a state where we can not rely on usermode call
9055          * to turn on the display, so we do it here
9056          */
9057         if (acrtc_state->stream->sink != aconnector->dc_sink)
9058                 dm_force_atomic_commit(&aconnector->base);
9059 }
9060
9061 /*
9062  * Grabs all modesetting locks to serialize against any blocking commits,
9063  * Waits for completion of all non blocking commits.
9064  */
9065 static int do_aquire_global_lock(struct drm_device *dev,
9066                                  struct drm_atomic_state *state)
9067 {
9068         struct drm_crtc *crtc;
9069         struct drm_crtc_commit *commit;
9070         long ret;
9071
9072         /*
9073          * Adding all modeset locks to aquire_ctx will
9074          * ensure that when the framework release it the
9075          * extra locks we are locking here will get released to
9076          */
9077         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9078         if (ret)
9079                 return ret;
9080
9081         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9082                 spin_lock(&crtc->commit_lock);
9083                 commit = list_first_entry_or_null(&crtc->commit_list,
9084                                 struct drm_crtc_commit, commit_entry);
9085                 if (commit)
9086                         drm_crtc_commit_get(commit);
9087                 spin_unlock(&crtc->commit_lock);
9088
9089                 if (!commit)
9090                         continue;
9091
9092                 /*
9093                  * Make sure all pending HW programming completed and
9094                  * page flips done
9095                  */
9096                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9097
9098                 if (ret > 0)
9099                         ret = wait_for_completion_interruptible_timeout(
9100                                         &commit->flip_done, 10*HZ);
9101
9102                 if (ret == 0)
9103                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9104                                   "timed out\n", crtc->base.id, crtc->name);
9105
9106                 drm_crtc_commit_put(commit);
9107         }
9108
9109         return ret < 0 ? ret : 0;
9110 }
9111
9112 static void get_freesync_config_for_crtc(
9113         struct dm_crtc_state *new_crtc_state,
9114         struct dm_connector_state *new_con_state)
9115 {
9116         struct mod_freesync_config config = {0};
9117         struct amdgpu_dm_connector *aconnector =
9118                         to_amdgpu_dm_connector(new_con_state->base.connector);
9119         struct drm_display_mode *mode = &new_crtc_state->base.mode;
9120         int vrefresh = drm_mode_vrefresh(mode);
9121         bool fs_vid_mode = false;
9122
9123         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9124                                         vrefresh >= aconnector->min_vfreq &&
9125                                         vrefresh <= aconnector->max_vfreq;
9126
9127         if (new_crtc_state->vrr_supported) {
9128                 new_crtc_state->stream->ignore_msa_timing_param = true;
9129                 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9130
9131                 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9132                 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9133                 config.vsif_supported = true;
9134                 config.btr = true;
9135
9136                 if (fs_vid_mode) {
9137                         config.state = VRR_STATE_ACTIVE_FIXED;
9138                         config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9139                         goto out;
9140                 } else if (new_crtc_state->base.vrr_enabled) {
9141                         config.state = VRR_STATE_ACTIVE_VARIABLE;
9142                 } else {
9143                         config.state = VRR_STATE_INACTIVE;
9144                 }
9145         }
9146 out:
9147         new_crtc_state->freesync_config = config;
9148 }
9149
9150 static void reset_freesync_config_for_crtc(
9151         struct dm_crtc_state *new_crtc_state)
9152 {
9153         new_crtc_state->vrr_supported = false;
9154
9155         memset(&new_crtc_state->vrr_infopacket, 0,
9156                sizeof(new_crtc_state->vrr_infopacket));
9157 }
9158
9159 static bool
9160 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9161                                  struct drm_crtc_state *new_crtc_state)
9162 {
9163         struct drm_display_mode old_mode, new_mode;
9164
9165         if (!old_crtc_state || !new_crtc_state)
9166                 return false;
9167
9168         old_mode = old_crtc_state->mode;
9169         new_mode = new_crtc_state->mode;
9170
9171         if (old_mode.clock       == new_mode.clock &&
9172             old_mode.hdisplay    == new_mode.hdisplay &&
9173             old_mode.vdisplay    == new_mode.vdisplay &&
9174             old_mode.htotal      == new_mode.htotal &&
9175             old_mode.vtotal      != new_mode.vtotal &&
9176             old_mode.hsync_start == new_mode.hsync_start &&
9177             old_mode.vsync_start != new_mode.vsync_start &&
9178             old_mode.hsync_end   == new_mode.hsync_end &&
9179             old_mode.vsync_end   != new_mode.vsync_end &&
9180             old_mode.hskew       == new_mode.hskew &&
9181             old_mode.vscan       == new_mode.vscan &&
9182             (old_mode.vsync_end - old_mode.vsync_start) ==
9183             (new_mode.vsync_end - new_mode.vsync_start))
9184                 return true;
9185
9186         return false;
9187 }
9188
9189 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9190         uint64_t num, den, res;
9191         struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9192
9193         dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9194
9195         num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9196         den = (unsigned long long)new_crtc_state->mode.htotal *
9197               (unsigned long long)new_crtc_state->mode.vtotal;
9198
9199         res = div_u64(num, den);
9200         dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9201 }
9202
9203 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9204                                 struct drm_atomic_state *state,
9205                                 struct drm_crtc *crtc,
9206                                 struct drm_crtc_state *old_crtc_state,
9207                                 struct drm_crtc_state *new_crtc_state,
9208                                 bool enable,
9209                                 bool *lock_and_validation_needed)
9210 {
9211         struct dm_atomic_state *dm_state = NULL;
9212         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9213         struct dc_stream_state *new_stream;
9214         int ret = 0;
9215
9216         /*
9217          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9218          * update changed items
9219          */
9220         struct amdgpu_crtc *acrtc = NULL;
9221         struct amdgpu_dm_connector *aconnector = NULL;
9222         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9223         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
9224
9225         new_stream = NULL;
9226
9227         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9228         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9229         acrtc = to_amdgpu_crtc(crtc);
9230         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
9231
9232         /* TODO This hack should go away */
9233         if (aconnector && enable) {
9234                 /* Make sure fake sink is created in plug-in scenario */
9235                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9236                                                             &aconnector->base);
9237                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9238                                                             &aconnector->base);
9239
9240                 if (IS_ERR(drm_new_conn_state)) {
9241                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9242                         goto fail;
9243                 }
9244
9245                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9246                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
9247
9248                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9249                         goto skip_modeset;
9250
9251                 new_stream = create_validate_stream_for_sink(aconnector,
9252                                                              &new_crtc_state->mode,
9253                                                              dm_new_conn_state,
9254                                                              dm_old_crtc_state->stream);
9255
9256                 /*
9257                  * we can have no stream on ACTION_SET if a display
9258                  * was disconnected during S3, in this case it is not an
9259                  * error, the OS will be updated after detection, and
9260                  * will do the right thing on next atomic commit
9261                  */
9262
9263                 if (!new_stream) {
9264                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9265                                         __func__, acrtc->base.base.id);
9266                         ret = -ENOMEM;
9267                         goto fail;
9268                 }
9269
9270                 /*
9271                  * TODO: Check VSDB bits to decide whether this should
9272                  * be enabled or not.
9273                  */
9274                 new_stream->triggered_crtc_reset.enabled =
9275                         dm->force_timing_sync;
9276
9277                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9278
9279                 ret = fill_hdr_info_packet(drm_new_conn_state,
9280                                            &new_stream->hdr_static_metadata);
9281                 if (ret)
9282                         goto fail;
9283
9284                 /*
9285                  * If we already removed the old stream from the context
9286                  * (and set the new stream to NULL) then we can't reuse
9287                  * the old stream even if the stream and scaling are unchanged.
9288                  * We'll hit the BUG_ON and black screen.
9289                  *
9290                  * TODO: Refactor this function to allow this check to work
9291                  * in all conditions.
9292                  */
9293                 if (amdgpu_freesync_vid_mode &&
9294                     dm_new_crtc_state->stream &&
9295                     is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9296                         goto skip_modeset;
9297
9298                 if (dm_new_crtc_state->stream &&
9299                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9300                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9301                         new_crtc_state->mode_changed = false;
9302                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9303                                          new_crtc_state->mode_changed);
9304                 }
9305         }
9306
9307         /* mode_changed flag may get updated above, need to check again */
9308         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9309                 goto skip_modeset;
9310
9311         DRM_DEBUG_ATOMIC(
9312                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9313                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9314                 "connectors_changed:%d\n",
9315                 acrtc->crtc_id,
9316                 new_crtc_state->enable,
9317                 new_crtc_state->active,
9318                 new_crtc_state->planes_changed,
9319                 new_crtc_state->mode_changed,
9320                 new_crtc_state->active_changed,
9321                 new_crtc_state->connectors_changed);
9322
9323         /* Remove stream for any changed/disabled CRTC */
9324         if (!enable) {
9325
9326                 if (!dm_old_crtc_state->stream)
9327                         goto skip_modeset;
9328
9329                 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
9330                     is_timing_unchanged_for_freesync(new_crtc_state,
9331                                                      old_crtc_state)) {
9332                         new_crtc_state->mode_changed = false;
9333                         DRM_DEBUG_DRIVER(
9334                                 "Mode change not required for front porch change, "
9335                                 "setting mode_changed to %d",
9336                                 new_crtc_state->mode_changed);
9337
9338                         set_freesync_fixed_config(dm_new_crtc_state);
9339
9340                         goto skip_modeset;
9341                 } else if (amdgpu_freesync_vid_mode && aconnector &&
9342                            is_freesync_video_mode(&new_crtc_state->mode,
9343                                                   aconnector)) {
9344                         set_freesync_fixed_config(dm_new_crtc_state);
9345                 }
9346
9347                 ret = dm_atomic_get_state(state, &dm_state);
9348                 if (ret)
9349                         goto fail;
9350
9351                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9352                                 crtc->base.id);
9353
9354                 /* i.e. reset mode */
9355                 if (dc_remove_stream_from_ctx(
9356                                 dm->dc,
9357                                 dm_state->context,
9358                                 dm_old_crtc_state->stream) != DC_OK) {
9359                         ret = -EINVAL;
9360                         goto fail;
9361                 }
9362
9363                 dc_stream_release(dm_old_crtc_state->stream);
9364                 dm_new_crtc_state->stream = NULL;
9365
9366                 reset_freesync_config_for_crtc(dm_new_crtc_state);
9367
9368                 *lock_and_validation_needed = true;
9369
9370         } else {/* Add stream for any updated/enabled CRTC */
9371                 /*
9372                  * Quick fix to prevent NULL pointer on new_stream when
9373                  * added MST connectors not found in existing crtc_state in the chained mode
9374                  * TODO: need to dig out the root cause of that
9375                  */
9376                 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
9377                         goto skip_modeset;
9378
9379                 if (modereset_required(new_crtc_state))
9380                         goto skip_modeset;
9381
9382                 if (modeset_required(new_crtc_state, new_stream,
9383                                      dm_old_crtc_state->stream)) {
9384
9385                         WARN_ON(dm_new_crtc_state->stream);
9386
9387                         ret = dm_atomic_get_state(state, &dm_state);
9388                         if (ret)
9389                                 goto fail;
9390
9391                         dm_new_crtc_state->stream = new_stream;
9392
9393                         dc_stream_retain(new_stream);
9394
9395                         DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
9396                                          crtc->base.id);
9397
9398                         if (dc_add_stream_to_ctx(
9399                                         dm->dc,
9400                                         dm_state->context,
9401                                         dm_new_crtc_state->stream) != DC_OK) {
9402                                 ret = -EINVAL;
9403                                 goto fail;
9404                         }
9405
9406                         *lock_and_validation_needed = true;
9407                 }
9408         }
9409
9410 skip_modeset:
9411         /* Release extra reference */
9412         if (new_stream)
9413                  dc_stream_release(new_stream);
9414
9415         /*
9416          * We want to do dc stream updates that do not require a
9417          * full modeset below.
9418          */
9419         if (!(enable && aconnector && new_crtc_state->active))
9420                 return 0;
9421         /*
9422          * Given above conditions, the dc state cannot be NULL because:
9423          * 1. We're in the process of enabling CRTCs (just been added
9424          *    to the dc context, or already is on the context)
9425          * 2. Has a valid connector attached, and
9426          * 3. Is currently active and enabled.
9427          * => The dc stream state currently exists.
9428          */
9429         BUG_ON(dm_new_crtc_state->stream == NULL);
9430
9431         /* Scaling or underscan settings */
9432         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
9433                 update_stream_scaling_settings(
9434                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
9435
9436         /* ABM settings */
9437         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9438
9439         /*
9440          * Color management settings. We also update color properties
9441          * when a modeset is needed, to ensure it gets reprogrammed.
9442          */
9443         if (dm_new_crtc_state->base.color_mgmt_changed ||
9444             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9445                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
9446                 if (ret)
9447                         goto fail;
9448         }
9449
9450         /* Update Freesync settings. */
9451         get_freesync_config_for_crtc(dm_new_crtc_state,
9452                                      dm_new_conn_state);
9453
9454         return ret;
9455
9456 fail:
9457         if (new_stream)
9458                 dc_stream_release(new_stream);
9459         return ret;
9460 }
9461
9462 static bool should_reset_plane(struct drm_atomic_state *state,
9463                                struct drm_plane *plane,
9464                                struct drm_plane_state *old_plane_state,
9465                                struct drm_plane_state *new_plane_state)
9466 {
9467         struct drm_plane *other;
9468         struct drm_plane_state *old_other_state, *new_other_state;
9469         struct drm_crtc_state *new_crtc_state;
9470         int i;
9471
9472         /*
9473          * TODO: Remove this hack once the checks below are sufficient
9474          * enough to determine when we need to reset all the planes on
9475          * the stream.
9476          */
9477         if (state->allow_modeset)
9478                 return true;
9479
9480         /* Exit early if we know that we're adding or removing the plane. */
9481         if (old_plane_state->crtc != new_plane_state->crtc)
9482                 return true;
9483
9484         /* old crtc == new_crtc == NULL, plane not in context. */
9485         if (!new_plane_state->crtc)
9486                 return false;
9487
9488         new_crtc_state =
9489                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9490
9491         if (!new_crtc_state)
9492                 return true;
9493
9494         /* CRTC Degamma changes currently require us to recreate planes. */
9495         if (new_crtc_state->color_mgmt_changed)
9496                 return true;
9497
9498         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9499                 return true;
9500
9501         /*
9502          * If there are any new primary or overlay planes being added or
9503          * removed then the z-order can potentially change. To ensure
9504          * correct z-order and pipe acquisition the current DC architecture
9505          * requires us to remove and recreate all existing planes.
9506          *
9507          * TODO: Come up with a more elegant solution for this.
9508          */
9509         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9510                 struct amdgpu_framebuffer *old_afb, *new_afb;
9511                 if (other->type == DRM_PLANE_TYPE_CURSOR)
9512                         continue;
9513
9514                 if (old_other_state->crtc != new_plane_state->crtc &&
9515                     new_other_state->crtc != new_plane_state->crtc)
9516                         continue;
9517
9518                 if (old_other_state->crtc != new_other_state->crtc)
9519                         return true;
9520
9521                 /* Src/dst size and scaling updates. */
9522                 if (old_other_state->src_w != new_other_state->src_w ||
9523                     old_other_state->src_h != new_other_state->src_h ||
9524                     old_other_state->crtc_w != new_other_state->crtc_w ||
9525                     old_other_state->crtc_h != new_other_state->crtc_h)
9526                         return true;
9527
9528                 /* Rotation / mirroring updates. */
9529                 if (old_other_state->rotation != new_other_state->rotation)
9530                         return true;
9531
9532                 /* Blending updates. */
9533                 if (old_other_state->pixel_blend_mode !=
9534                     new_other_state->pixel_blend_mode)
9535                         return true;
9536
9537                 /* Alpha updates. */
9538                 if (old_other_state->alpha != new_other_state->alpha)
9539                         return true;
9540
9541                 /* Colorspace changes. */
9542                 if (old_other_state->color_range != new_other_state->color_range ||
9543                     old_other_state->color_encoding != new_other_state->color_encoding)
9544                         return true;
9545
9546                 /* Framebuffer checks fall at the end. */
9547                 if (!old_other_state->fb || !new_other_state->fb)
9548                         continue;
9549
9550                 /* Pixel format changes can require bandwidth updates. */
9551                 if (old_other_state->fb->format != new_other_state->fb->format)
9552                         return true;
9553
9554                 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9555                 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9556
9557                 /* Tiling and DCC changes also require bandwidth updates. */
9558                 if (old_afb->tiling_flags != new_afb->tiling_flags ||
9559                     old_afb->base.modifier != new_afb->base.modifier)
9560                         return true;
9561         }
9562
9563         return false;
9564 }
9565
9566 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9567                               struct drm_plane_state *new_plane_state,
9568                               struct drm_framebuffer *fb)
9569 {
9570         struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9571         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9572         unsigned int pitch;
9573         bool linear;
9574
9575         if (fb->width > new_acrtc->max_cursor_width ||
9576             fb->height > new_acrtc->max_cursor_height) {
9577                 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9578                                  new_plane_state->fb->width,
9579                                  new_plane_state->fb->height);
9580                 return -EINVAL;
9581         }
9582         if (new_plane_state->src_w != fb->width << 16 ||
9583             new_plane_state->src_h != fb->height << 16) {
9584                 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9585                 return -EINVAL;
9586         }
9587
9588         /* Pitch in pixels */
9589         pitch = fb->pitches[0] / fb->format->cpp[0];
9590
9591         if (fb->width != pitch) {
9592                 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9593                                  fb->width, pitch);
9594                 return -EINVAL;
9595         }
9596
9597         switch (pitch) {
9598         case 64:
9599         case 128:
9600         case 256:
9601                 /* FB pitch is supported by cursor plane */
9602                 break;
9603         default:
9604                 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9605                 return -EINVAL;
9606         }
9607
9608         /* Core DRM takes care of checking FB modifiers, so we only need to
9609          * check tiling flags when the FB doesn't have a modifier. */
9610         if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9611                 if (adev->family < AMDGPU_FAMILY_AI) {
9612                         linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9613                                  AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9614                                  AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9615                 } else {
9616                         linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9617                 }
9618                 if (!linear) {
9619                         DRM_DEBUG_ATOMIC("Cursor FB not linear");
9620                         return -EINVAL;
9621                 }
9622         }
9623
9624         return 0;
9625 }
9626
9627 static int dm_update_plane_state(struct dc *dc,
9628                                  struct drm_atomic_state *state,
9629                                  struct drm_plane *plane,
9630                                  struct drm_plane_state *old_plane_state,
9631                                  struct drm_plane_state *new_plane_state,
9632                                  bool enable,
9633                                  bool *lock_and_validation_needed)
9634 {
9635
9636         struct dm_atomic_state *dm_state = NULL;
9637         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9638         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9639         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9640         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9641         struct amdgpu_crtc *new_acrtc;
9642         bool needs_reset;
9643         int ret = 0;
9644
9645
9646         new_plane_crtc = new_plane_state->crtc;
9647         old_plane_crtc = old_plane_state->crtc;
9648         dm_new_plane_state = to_dm_plane_state(new_plane_state);
9649         dm_old_plane_state = to_dm_plane_state(old_plane_state);
9650
9651         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9652                 if (!enable || !new_plane_crtc ||
9653                         drm_atomic_plane_disabling(plane->state, new_plane_state))
9654                         return 0;
9655
9656                 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9657
9658                 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9659                         DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9660                         return -EINVAL;
9661                 }
9662
9663                 if (new_plane_state->fb) {
9664                         ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9665                                                  new_plane_state->fb);
9666                         if (ret)
9667                                 return ret;
9668                 }
9669
9670                 return 0;
9671         }
9672
9673         needs_reset = should_reset_plane(state, plane, old_plane_state,
9674                                          new_plane_state);
9675
9676         /* Remove any changed/removed planes */
9677         if (!enable) {
9678                 if (!needs_reset)
9679                         return 0;
9680
9681                 if (!old_plane_crtc)
9682                         return 0;
9683
9684                 old_crtc_state = drm_atomic_get_old_crtc_state(
9685                                 state, old_plane_crtc);
9686                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9687
9688                 if (!dm_old_crtc_state->stream)
9689                         return 0;
9690
9691                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9692                                 plane->base.id, old_plane_crtc->base.id);
9693
9694                 ret = dm_atomic_get_state(state, &dm_state);
9695                 if (ret)
9696                         return ret;
9697
9698                 if (!dc_remove_plane_from_context(
9699                                 dc,
9700                                 dm_old_crtc_state->stream,
9701                                 dm_old_plane_state->dc_state,
9702                                 dm_state->context)) {
9703
9704                         return -EINVAL;
9705                 }
9706
9707
9708                 dc_plane_state_release(dm_old_plane_state->dc_state);
9709                 dm_new_plane_state->dc_state = NULL;
9710
9711                 *lock_and_validation_needed = true;
9712
9713         } else { /* Add new planes */
9714                 struct dc_plane_state *dc_new_plane_state;
9715
9716                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9717                         return 0;
9718
9719                 if (!new_plane_crtc)
9720                         return 0;
9721
9722                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9723                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9724
9725                 if (!dm_new_crtc_state->stream)
9726                         return 0;
9727
9728                 if (!needs_reset)
9729                         return 0;
9730
9731                 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9732                 if (ret)
9733                         return ret;
9734
9735                 WARN_ON(dm_new_plane_state->dc_state);
9736
9737                 dc_new_plane_state = dc_create_plane_state(dc);
9738                 if (!dc_new_plane_state)
9739                         return -ENOMEM;
9740
9741                 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
9742                                  plane->base.id, new_plane_crtc->base.id);
9743
9744                 ret = fill_dc_plane_attributes(
9745                         drm_to_adev(new_plane_crtc->dev),
9746                         dc_new_plane_state,
9747                         new_plane_state,
9748                         new_crtc_state);
9749                 if (ret) {
9750                         dc_plane_state_release(dc_new_plane_state);
9751                         return ret;
9752                 }
9753
9754                 ret = dm_atomic_get_state(state, &dm_state);
9755                 if (ret) {
9756                         dc_plane_state_release(dc_new_plane_state);
9757                         return ret;
9758                 }
9759
9760                 /*
9761                  * Any atomic check errors that occur after this will
9762                  * not need a release. The plane state will be attached
9763                  * to the stream, and therefore part of the atomic
9764                  * state. It'll be released when the atomic state is
9765                  * cleaned.
9766                  */
9767                 if (!dc_add_plane_to_context(
9768                                 dc,
9769                                 dm_new_crtc_state->stream,
9770                                 dc_new_plane_state,
9771                                 dm_state->context)) {
9772
9773                         dc_plane_state_release(dc_new_plane_state);
9774                         return -EINVAL;
9775                 }
9776
9777                 dm_new_plane_state->dc_state = dc_new_plane_state;
9778
9779                 /* Tell DC to do a full surface update every time there
9780                  * is a plane change. Inefficient, but works for now.
9781                  */
9782                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9783
9784                 *lock_and_validation_needed = true;
9785         }
9786
9787
9788         return ret;
9789 }
9790
9791 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9792                                 struct drm_crtc *crtc,
9793                                 struct drm_crtc_state *new_crtc_state)
9794 {
9795         struct drm_plane_state *new_cursor_state, *new_primary_state;
9796         int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9797
9798         /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9799          * cursor per pipe but it's going to inherit the scaling and
9800          * positioning from the underlying pipe. Check the cursor plane's
9801          * blending properties match the primary plane's. */
9802
9803         new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9804         new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9805         if (!new_cursor_state || !new_primary_state ||
9806             !new_cursor_state->fb || !new_primary_state->fb) {
9807                 return 0;
9808         }
9809
9810         cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9811                          (new_cursor_state->src_w >> 16);
9812         cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9813                          (new_cursor_state->src_h >> 16);
9814
9815         primary_scale_w = new_primary_state->crtc_w * 1000 /
9816                          (new_primary_state->src_w >> 16);
9817         primary_scale_h = new_primary_state->crtc_h * 1000 /
9818                          (new_primary_state->src_h >> 16);
9819
9820         if (cursor_scale_w != primary_scale_w ||
9821             cursor_scale_h != primary_scale_h) {
9822                 DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9823                 return -EINVAL;
9824         }
9825
9826         return 0;
9827 }
9828
9829 #if defined(CONFIG_DRM_AMD_DC_DCN)
9830 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9831 {
9832         struct drm_connector *connector;
9833         struct drm_connector_state *conn_state;
9834         struct amdgpu_dm_connector *aconnector = NULL;
9835         int i;
9836         for_each_new_connector_in_state(state, connector, conn_state, i) {
9837                 if (conn_state->crtc != crtc)
9838                         continue;
9839
9840                 aconnector = to_amdgpu_dm_connector(connector);
9841                 if (!aconnector->port || !aconnector->mst_port)
9842                         aconnector = NULL;
9843                 else
9844                         break;
9845         }
9846
9847         if (!aconnector)
9848                 return 0;
9849
9850         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9851 }
9852 #endif
9853
9854 /**
9855  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9856  * @dev: The DRM device
9857  * @state: The atomic state to commit
9858  *
9859  * Validate that the given atomic state is programmable by DC into hardware.
9860  * This involves constructing a &struct dc_state reflecting the new hardware
9861  * state we wish to commit, then querying DC to see if it is programmable. It's
9862  * important not to modify the existing DC state. Otherwise, atomic_check
9863  * may unexpectedly commit hardware changes.
9864  *
9865  * When validating the DC state, it's important that the right locks are
9866  * acquired. For full updates case which removes/adds/updates streams on one
9867  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9868  * that any such full update commit will wait for completion of any outstanding
9869  * flip using DRMs synchronization events.
9870  *
9871  * Note that DM adds the affected connectors for all CRTCs in state, when that
9872  * might not seem necessary. This is because DC stream creation requires the
9873  * DC sink, which is tied to the DRM connector state. Cleaning this up should
9874  * be possible but non-trivial - a possible TODO item.
9875  *
9876  * Return: -Error code if validation failed.
9877  */
9878 static int amdgpu_dm_atomic_check(struct drm_device *dev,
9879                                   struct drm_atomic_state *state)
9880 {
9881         struct amdgpu_device *adev = drm_to_adev(dev);
9882         struct dm_atomic_state *dm_state = NULL;
9883         struct dc *dc = adev->dm.dc;
9884         struct drm_connector *connector;
9885         struct drm_connector_state *old_con_state, *new_con_state;
9886         struct drm_crtc *crtc;
9887         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9888         struct drm_plane *plane;
9889         struct drm_plane_state *old_plane_state, *new_plane_state;
9890         enum dc_status status;
9891         int ret, i;
9892         bool lock_and_validation_needed = false;
9893         struct dm_crtc_state *dm_old_crtc_state;
9894
9895         trace_amdgpu_dm_atomic_check_begin(state);
9896
9897         ret = drm_atomic_helper_check_modeset(dev, state);
9898         if (ret)
9899                 goto fail;
9900
9901         /* Check connector changes */
9902         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9903                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9904                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9905
9906                 /* Skip connectors that are disabled or part of modeset already. */
9907                 if (!old_con_state->crtc && !new_con_state->crtc)
9908                         continue;
9909
9910                 if (!new_con_state->crtc)
9911                         continue;
9912
9913                 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9914                 if (IS_ERR(new_crtc_state)) {
9915                         ret = PTR_ERR(new_crtc_state);
9916                         goto fail;
9917                 }
9918
9919                 if (dm_old_con_state->abm_level !=
9920                     dm_new_con_state->abm_level)
9921                         new_crtc_state->connectors_changed = true;
9922         }
9923
9924 #if defined(CONFIG_DRM_AMD_DC_DCN)
9925         if (dc_resource_is_dsc_encoding_supported(dc)) {
9926                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9927                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9928                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
9929                                 if (ret)
9930                                         goto fail;
9931                         }
9932                 }
9933         }
9934 #endif
9935         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9936                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9937
9938                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
9939                     !new_crtc_state->color_mgmt_changed &&
9940                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9941                         dm_old_crtc_state->dsc_force_changed == false)
9942                         continue;
9943
9944                 if (!new_crtc_state->enable)
9945                         continue;
9946
9947                 ret = drm_atomic_add_affected_connectors(state, crtc);
9948                 if (ret)
9949                         return ret;
9950
9951                 ret = drm_atomic_add_affected_planes(state, crtc);
9952                 if (ret)
9953                         goto fail;
9954
9955                 if (dm_old_crtc_state->dsc_force_changed)
9956                         new_crtc_state->mode_changed = true;
9957         }
9958
9959         /*
9960          * Add all primary and overlay planes on the CRTC to the state
9961          * whenever a plane is enabled to maintain correct z-ordering
9962          * and to enable fast surface updates.
9963          */
9964         drm_for_each_crtc(crtc, dev) {
9965                 bool modified = false;
9966
9967                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9968                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
9969                                 continue;
9970
9971                         if (new_plane_state->crtc == crtc ||
9972                             old_plane_state->crtc == crtc) {
9973                                 modified = true;
9974                                 break;
9975                         }
9976                 }
9977
9978                 if (!modified)
9979                         continue;
9980
9981                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
9982                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
9983                                 continue;
9984
9985                         new_plane_state =
9986                                 drm_atomic_get_plane_state(state, plane);
9987
9988                         if (IS_ERR(new_plane_state)) {
9989                                 ret = PTR_ERR(new_plane_state);
9990                                 goto fail;
9991                         }
9992                 }
9993         }
9994
9995         /* Remove exiting planes if they are modified */
9996         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9997                 ret = dm_update_plane_state(dc, state, plane,
9998                                             old_plane_state,
9999                                             new_plane_state,
10000                                             false,
10001                                             &lock_and_validation_needed);
10002                 if (ret)
10003                         goto fail;
10004         }
10005
10006         /* Disable all crtcs which require disable */
10007         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10008                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10009                                            old_crtc_state,
10010                                            new_crtc_state,
10011                                            false,
10012                                            &lock_and_validation_needed);
10013                 if (ret)
10014                         goto fail;
10015         }
10016
10017         /* Enable all crtcs which require enable */
10018         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10019                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10020                                            old_crtc_state,
10021                                            new_crtc_state,
10022                                            true,
10023                                            &lock_and_validation_needed);
10024                 if (ret)
10025                         goto fail;
10026         }
10027
10028         /* Add new/modified planes */
10029         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10030                 ret = dm_update_plane_state(dc, state, plane,
10031                                             old_plane_state,
10032                                             new_plane_state,
10033                                             true,
10034                                             &lock_and_validation_needed);
10035                 if (ret)
10036                         goto fail;
10037         }
10038
10039         /* Run this here since we want to validate the streams we created */
10040         ret = drm_atomic_helper_check_planes(dev, state);
10041         if (ret)
10042                 goto fail;
10043
10044         /* Check cursor planes scaling */
10045         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10046                 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10047                 if (ret)
10048                         goto fail;
10049         }
10050
10051         if (state->legacy_cursor_update) {
10052                 /*
10053                  * This is a fast cursor update coming from the plane update
10054                  * helper, check if it can be done asynchronously for better
10055                  * performance.
10056                  */
10057                 state->async_update =
10058                         !drm_atomic_helper_async_check(dev, state);
10059
10060                 /*
10061                  * Skip the remaining global validation if this is an async
10062                  * update. Cursor updates can be done without affecting
10063                  * state or bandwidth calcs and this avoids the performance
10064                  * penalty of locking the private state object and
10065                  * allocating a new dc_state.
10066                  */
10067                 if (state->async_update)
10068                         return 0;
10069         }
10070
10071         /* Check scaling and underscan changes*/
10072         /* TODO Removed scaling changes validation due to inability to commit
10073          * new stream into context w\o causing full reset. Need to
10074          * decide how to handle.
10075          */
10076         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10077                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10078                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10079                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10080
10081                 /* Skip any modesets/resets */
10082                 if (!acrtc || drm_atomic_crtc_needs_modeset(
10083                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10084                         continue;
10085
10086                 /* Skip any thing not scale or underscan changes */
10087                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10088                         continue;
10089
10090                 lock_and_validation_needed = true;
10091         }
10092
10093         /**
10094          * Streams and planes are reset when there are changes that affect
10095          * bandwidth. Anything that affects bandwidth needs to go through
10096          * DC global validation to ensure that the configuration can be applied
10097          * to hardware.
10098          *
10099          * We have to currently stall out here in atomic_check for outstanding
10100          * commits to finish in this case because our IRQ handlers reference
10101          * DRM state directly - we can end up disabling interrupts too early
10102          * if we don't.
10103          *
10104          * TODO: Remove this stall and drop DM state private objects.
10105          */
10106         if (lock_and_validation_needed) {
10107                 ret = dm_atomic_get_state(state, &dm_state);
10108                 if (ret)
10109                         goto fail;
10110
10111                 ret = do_aquire_global_lock(dev, state);
10112                 if (ret)
10113                         goto fail;
10114
10115 #if defined(CONFIG_DRM_AMD_DC_DCN)
10116                 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
10117                         goto fail;
10118
10119                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
10120                 if (ret)
10121                         goto fail;
10122 #endif
10123
10124                 /*
10125                  * Perform validation of MST topology in the state:
10126                  * We need to perform MST atomic check before calling
10127                  * dc_validate_global_state(), or there is a chance
10128                  * to get stuck in an infinite loop and hang eventually.
10129                  */
10130                 ret = drm_dp_mst_atomic_check(state);
10131                 if (ret)
10132                         goto fail;
10133                 status = dc_validate_global_state(dc, dm_state->context, false);
10134                 if (status != DC_OK) {
10135                         DC_LOG_WARNING("DC global validation failure: %s (%d)",
10136                                        dc_status_to_str(status), status);
10137                         ret = -EINVAL;
10138                         goto fail;
10139                 }
10140         } else {
10141                 /*
10142                  * The commit is a fast update. Fast updates shouldn't change
10143                  * the DC context, affect global validation, and can have their
10144                  * commit work done in parallel with other commits not touching
10145                  * the same resource. If we have a new DC context as part of
10146                  * the DM atomic state from validation we need to free it and
10147                  * retain the existing one instead.
10148                  *
10149                  * Furthermore, since the DM atomic state only contains the DC
10150                  * context and can safely be annulled, we can free the state
10151                  * and clear the associated private object now to free
10152                  * some memory and avoid a possible use-after-free later.
10153                  */
10154
10155                 for (i = 0; i < state->num_private_objs; i++) {
10156                         struct drm_private_obj *obj = state->private_objs[i].ptr;
10157
10158                         if (obj->funcs == adev->dm.atomic_obj.funcs) {
10159                                 int j = state->num_private_objs-1;
10160
10161                                 dm_atomic_destroy_state(obj,
10162                                                 state->private_objs[i].state);
10163
10164                                 /* If i is not at the end of the array then the
10165                                  * last element needs to be moved to where i was
10166                                  * before the array can safely be truncated.
10167                                  */
10168                                 if (i != j)
10169                                         state->private_objs[i] =
10170                                                 state->private_objs[j];
10171
10172                                 state->private_objs[j].ptr = NULL;
10173                                 state->private_objs[j].state = NULL;
10174                                 state->private_objs[j].old_state = NULL;
10175                                 state->private_objs[j].new_state = NULL;
10176
10177                                 state->num_private_objs = j;
10178                                 break;
10179                         }
10180                 }
10181         }
10182
10183         /* Store the overall update type for use later in atomic check. */
10184         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10185                 struct dm_crtc_state *dm_new_crtc_state =
10186                         to_dm_crtc_state(new_crtc_state);
10187
10188                 dm_new_crtc_state->update_type = lock_and_validation_needed ?
10189                                                          UPDATE_TYPE_FULL :
10190                                                          UPDATE_TYPE_FAST;
10191         }
10192
10193         /* Must be success */
10194         WARN_ON(ret);
10195
10196         trace_amdgpu_dm_atomic_check_finish(state, ret);
10197
10198         return ret;
10199
10200 fail:
10201         if (ret == -EDEADLK)
10202                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
10203         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
10204                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
10205         else
10206                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
10207
10208         trace_amdgpu_dm_atomic_check_finish(state, ret);
10209
10210         return ret;
10211 }
10212
10213 static bool is_dp_capable_without_timing_msa(struct dc *dc,
10214                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
10215 {
10216         uint8_t dpcd_data;
10217         bool capable = false;
10218
10219         if (amdgpu_dm_connector->dc_link &&
10220                 dm_helpers_dp_read_dpcd(
10221                                 NULL,
10222                                 amdgpu_dm_connector->dc_link,
10223                                 DP_DOWN_STREAM_PORT_COUNT,
10224                                 &dpcd_data,
10225                                 sizeof(dpcd_data))) {
10226                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10227         }
10228
10229         return capable;
10230 }
10231
10232 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10233                 uint8_t *edid_ext, int len,
10234                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10235 {
10236         int i;
10237         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10238         struct dc *dc = adev->dm.dc;
10239
10240         /* send extension block to DMCU for parsing */
10241         for (i = 0; i < len; i += 8) {
10242                 bool res;
10243                 int offset;
10244
10245                 /* send 8 bytes a time */
10246                 if (!dc_edid_parser_send_cea(dc, i, len, &edid_ext[i], 8))
10247                         return false;
10248
10249                 if (i+8 == len) {
10250                         /* EDID block sent completed, expect result */
10251                         int version, min_rate, max_rate;
10252
10253                         res = dc_edid_parser_recv_amd_vsdb(dc, &version, &min_rate, &max_rate);
10254                         if (res) {
10255                                 /* amd vsdb found */
10256                                 vsdb_info->freesync_supported = 1;
10257                                 vsdb_info->amd_vsdb_version = version;
10258                                 vsdb_info->min_refresh_rate_hz = min_rate;
10259                                 vsdb_info->max_refresh_rate_hz = max_rate;
10260                                 return true;
10261                         }
10262                         /* not amd vsdb */
10263                         return false;
10264                 }
10265
10266                 /* check for ack*/
10267                 res = dc_edid_parser_recv_cea_ack(dc, &offset);
10268                 if (!res)
10269                         return false;
10270         }
10271
10272         return false;
10273 }
10274
10275 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10276                 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10277 {
10278         uint8_t *edid_ext = NULL;
10279         int i;
10280         bool valid_vsdb_found = false;
10281
10282         /*----- drm_find_cea_extension() -----*/
10283         /* No EDID or EDID extensions */
10284         if (edid == NULL || edid->extensions == 0)
10285                 return -ENODEV;
10286
10287         /* Find CEA extension */
10288         for (i = 0; i < edid->extensions; i++) {
10289                 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10290                 if (edid_ext[0] == CEA_EXT)
10291                         break;
10292         }
10293
10294         if (i == edid->extensions)
10295                 return -ENODEV;
10296
10297         /*----- cea_db_offsets() -----*/
10298         if (edid_ext[0] != CEA_EXT)
10299                 return -ENODEV;
10300
10301         valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
10302
10303         return valid_vsdb_found ? i : -ENODEV;
10304 }
10305
10306 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10307                                         struct edid *edid)
10308 {
10309         int i = 0;
10310         struct detailed_timing *timing;
10311         struct detailed_non_pixel *data;
10312         struct detailed_data_monitor_range *range;
10313         struct amdgpu_dm_connector *amdgpu_dm_connector =
10314                         to_amdgpu_dm_connector(connector);
10315         struct dm_connector_state *dm_con_state = NULL;
10316
10317         struct drm_device *dev = connector->dev;
10318         struct amdgpu_device *adev = drm_to_adev(dev);
10319         bool freesync_capable = false;
10320         struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
10321
10322         if (!connector->state) {
10323                 DRM_ERROR("%s - Connector has no state", __func__);
10324                 goto update;
10325         }
10326
10327         if (!edid) {
10328                 dm_con_state = to_dm_connector_state(connector->state);
10329
10330                 amdgpu_dm_connector->min_vfreq = 0;
10331                 amdgpu_dm_connector->max_vfreq = 0;
10332                 amdgpu_dm_connector->pixel_clock_mhz = 0;
10333
10334                 goto update;
10335         }
10336
10337         dm_con_state = to_dm_connector_state(connector->state);
10338
10339         if (!amdgpu_dm_connector->dc_sink) {
10340                 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
10341                 goto update;
10342         }
10343         if (!adev->dm.freesync_module)
10344                 goto update;
10345
10346
10347         if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10348                 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
10349                 bool edid_check_required = false;
10350
10351                 if (edid) {
10352                         edid_check_required = is_dp_capable_without_timing_msa(
10353                                                 adev->dm.dc,
10354                                                 amdgpu_dm_connector);
10355                 }
10356
10357                 if (edid_check_required == true && (edid->version > 1 ||
10358                    (edid->version == 1 && edid->revision > 1))) {
10359                         for (i = 0; i < 4; i++) {
10360
10361                                 timing  = &edid->detailed_timings[i];
10362                                 data    = &timing->data.other_data;
10363                                 range   = &data->data.range;
10364                                 /*
10365                                  * Check if monitor has continuous frequency mode
10366                                  */
10367                                 if (data->type != EDID_DETAIL_MONITOR_RANGE)
10368                                         continue;
10369                                 /*
10370                                  * Check for flag range limits only. If flag == 1 then
10371                                  * no additional timing information provided.
10372                                  * Default GTF, GTF Secondary curve and CVT are not
10373                                  * supported
10374                                  */
10375                                 if (range->flags != 1)
10376                                         continue;
10377
10378                                 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10379                                 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10380                                 amdgpu_dm_connector->pixel_clock_mhz =
10381                                         range->pixel_clock_mhz * 10;
10382
10383                                 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10384                                 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10385
10386                                 break;
10387                         }
10388
10389                         if (amdgpu_dm_connector->max_vfreq -
10390                             amdgpu_dm_connector->min_vfreq > 10) {
10391
10392                                 freesync_capable = true;
10393                         }
10394                 }
10395         } else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10396                 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10397                 if (i >= 0 && vsdb_info.freesync_supported) {
10398                         timing  = &edid->detailed_timings[i];
10399                         data    = &timing->data.other_data;
10400
10401                         amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10402                         amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10403                         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10404                                 freesync_capable = true;
10405
10406                         connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10407                         connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10408                 }
10409         }
10410
10411 update:
10412         if (dm_con_state)
10413                 dm_con_state->freesync_capable = freesync_capable;
10414
10415         if (connector->vrr_capable_property)
10416                 drm_connector_set_vrr_capable_property(connector,
10417                                                        freesync_capable);
10418 }
10419
10420 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
10421 {
10422         uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
10423
10424         if (!(link->connector_signal & SIGNAL_TYPE_EDP))
10425                 return;
10426         if (link->type == dc_connection_none)
10427                 return;
10428         if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
10429                                         dpcd_data, sizeof(dpcd_data))) {
10430                 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
10431
10432                 if (dpcd_data[0] == 0) {
10433                         link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
10434                         link->psr_settings.psr_feature_enabled = false;
10435                 } else {
10436                         link->psr_settings.psr_version = DC_PSR_VERSION_1;
10437                         link->psr_settings.psr_feature_enabled = true;
10438                 }
10439
10440                 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
10441         }
10442 }
10443
10444 /*
10445  * amdgpu_dm_link_setup_psr() - configure psr link
10446  * @stream: stream state
10447  *
10448  * Return: true if success
10449  */
10450 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
10451 {
10452         struct dc_link *link = NULL;
10453         struct psr_config psr_config = {0};
10454         struct psr_context psr_context = {0};
10455         bool ret = false;
10456
10457         if (stream == NULL)
10458                 return false;
10459
10460         link = stream->link;
10461
10462         psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
10463
10464         if (psr_config.psr_version > 0) {
10465                 psr_config.psr_exit_link_training_required = 0x1;
10466                 psr_config.psr_frame_capture_indication_req = 0;
10467                 psr_config.psr_rfb_setup_time = 0x37;
10468                 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
10469                 psr_config.allow_smu_optimizations = 0x0;
10470
10471                 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
10472
10473         }
10474         DRM_DEBUG_DRIVER("PSR link: %d\n",      link->psr_settings.psr_feature_enabled);
10475
10476         return ret;
10477 }
10478
10479 /*
10480  * amdgpu_dm_psr_enable() - enable psr f/w
10481  * @stream: stream state
10482  *
10483  * Return: true if success
10484  */
10485 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
10486 {
10487         struct dc_link *link = stream->link;
10488         unsigned int vsync_rate_hz = 0;
10489         struct dc_static_screen_params params = {0};
10490         /* Calculate number of static frames before generating interrupt to
10491          * enter PSR.
10492          */
10493         // Init fail safe of 2 frames static
10494         unsigned int num_frames_static = 2;
10495
10496         DRM_DEBUG_DRIVER("Enabling psr...\n");
10497
10498         vsync_rate_hz = div64_u64(div64_u64((
10499                         stream->timing.pix_clk_100hz * 100),
10500                         stream->timing.v_total),
10501                         stream->timing.h_total);
10502
10503         /* Round up
10504          * Calculate number of frames such that at least 30 ms of time has
10505          * passed.
10506          */
10507         if (vsync_rate_hz != 0) {
10508                 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
10509                 num_frames_static = (30000 / frame_time_microsec) + 1;
10510         }
10511
10512         params.triggers.cursor_update = true;
10513         params.triggers.overlay_update = true;
10514         params.triggers.surface_update = true;
10515         params.num_frames = num_frames_static;
10516
10517         dc_stream_set_static_screen_params(link->ctx->dc,
10518                                            &stream, 1,
10519                                            &params);
10520
10521         return dc_link_set_psr_allow_active(link, true, false, false);
10522 }
10523
10524 /*
10525  * amdgpu_dm_psr_disable() - disable psr f/w
10526  * @stream:  stream state
10527  *
10528  * Return: true if success
10529  */
10530 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
10531 {
10532
10533         DRM_DEBUG_DRIVER("Disabling psr...\n");
10534
10535         return dc_link_set_psr_allow_active(stream->link, false, true, false);
10536 }
10537
10538 /*
10539  * amdgpu_dm_psr_disable() - disable psr f/w
10540  * if psr is enabled on any stream
10541  *
10542  * Return: true if success
10543  */
10544 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
10545 {
10546         DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
10547         return dc_set_psr_allow_active(dm->dc, false);
10548 }
10549
10550 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10551 {
10552         struct amdgpu_device *adev = drm_to_adev(dev);
10553         struct dc *dc = adev->dm.dc;
10554         int i;
10555
10556         mutex_lock(&adev->dm.dc_lock);
10557         if (dc->current_state) {
10558                 for (i = 0; i < dc->current_state->stream_count; ++i)
10559                         dc->current_state->streams[i]
10560                                 ->triggered_crtc_reset.enabled =
10561                                 adev->dm.force_timing_sync;
10562
10563                 dm_enable_per_frame_crtc_master_sync(dc->current_state);
10564                 dc_trigger_sync(dc, dc->current_state);
10565         }
10566         mutex_unlock(&adev->dm.dc_lock);
10567 }
10568
10569 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10570                        uint32_t value, const char *func_name)
10571 {
10572 #ifdef DM_CHECK_ADDR_0
10573         if (address == 0) {
10574                 DC_ERR("invalid register write. address = 0");
10575                 return;
10576         }
10577 #endif
10578         cgs_write_register(ctx->cgs_device, address, value);
10579         trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10580 }
10581
10582 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10583                           const char *func_name)
10584 {
10585         uint32_t value;
10586 #ifdef DM_CHECK_ADDR_0
10587         if (address == 0) {
10588                 DC_ERR("invalid register read; address = 0\n");
10589                 return 0;
10590         }
10591 #endif
10592
10593         if (ctx->dmub_srv &&
10594             ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10595             !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10596                 ASSERT(false);
10597                 return 0;
10598         }
10599
10600         value = cgs_read_register(ctx->cgs_device, address);
10601
10602         trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10603
10604         return value;
10605 }