drm/connector: Add helper to compare HDR metadata
[platform/kernel/linux-starfive.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "dc/dc_edid_parser.h"
38 #include "amdgpu_dm_trace.h"
39
40 #include "vid.h"
41 #include "amdgpu.h"
42 #include "amdgpu_display.h"
43 #include "amdgpu_ucode.h"
44 #include "atom.h"
45 #include "amdgpu_dm.h"
46 #ifdef CONFIG_DRM_AMD_DC_HDCP
47 #include "amdgpu_dm_hdcp.h"
48 #include <drm/drm_hdcp.h>
49 #endif
50 #include "amdgpu_pm.h"
51
52 #include "amd_shared.h"
53 #include "amdgpu_dm_irq.h"
54 #include "dm_helpers.h"
55 #include "amdgpu_dm_mst_types.h"
56 #if defined(CONFIG_DEBUG_FS)
57 #include "amdgpu_dm_debugfs.h"
58 #endif
59
60 #include "ivsrcid/ivsrcid_vislands30.h"
61
62 #include <linux/module.h>
63 #include <linux/moduleparam.h>
64 #include <linux/types.h>
65 #include <linux/pm_runtime.h>
66 #include <linux/pci.h>
67 #include <linux/firmware.h>
68 #include <linux/component.h>
69
70 #include <drm/drm_atomic.h>
71 #include <drm/drm_atomic_uapi.h>
72 #include <drm/drm_atomic_helper.h>
73 #include <drm/drm_dp_mst_helper.h>
74 #include <drm/drm_fb_helper.h>
75 #include <drm/drm_fourcc.h>
76 #include <drm/drm_edid.h>
77 #include <drm/drm_vblank.h>
78 #include <drm/drm_audio_component.h>
79
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87
88 #include "soc15_common.h"
89 #endif
90
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
98 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
99 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
100 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
101 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
103 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
105 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
107
108 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
109 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
110
111 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
112 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
113
114 /* Number of bytes in PSP header for firmware. */
115 #define PSP_HEADER_BYTES 0x100
116
117 /* Number of bytes in PSP footer for firmware. */
118 #define PSP_FOOTER_BYTES 0x100
119
120 /**
121  * DOC: overview
122  *
123  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
124  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
125  * requests into DC requests, and DC responses into DRM responses.
126  *
127  * The root control structure is &struct amdgpu_display_manager.
128  */
129
130 /* basic init/fini API */
131 static int amdgpu_dm_init(struct amdgpu_device *adev);
132 static void amdgpu_dm_fini(struct amdgpu_device *adev);
133 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
134
135 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
136 {
137         switch (link->dpcd_caps.dongle_type) {
138         case DISPLAY_DONGLE_NONE:
139                 return DRM_MODE_SUBCONNECTOR_Native;
140         case DISPLAY_DONGLE_DP_VGA_CONVERTER:
141                 return DRM_MODE_SUBCONNECTOR_VGA;
142         case DISPLAY_DONGLE_DP_DVI_CONVERTER:
143         case DISPLAY_DONGLE_DP_DVI_DONGLE:
144                 return DRM_MODE_SUBCONNECTOR_DVID;
145         case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
146         case DISPLAY_DONGLE_DP_HDMI_DONGLE:
147                 return DRM_MODE_SUBCONNECTOR_HDMIA;
148         case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
149         default:
150                 return DRM_MODE_SUBCONNECTOR_Unknown;
151         }
152 }
153
154 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
155 {
156         struct dc_link *link = aconnector->dc_link;
157         struct drm_connector *connector = &aconnector->base;
158         enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
159
160         if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
161                 return;
162
163         if (aconnector->dc_sink)
164                 subconnector = get_subconnector_type(link);
165
166         drm_object_property_set_value(&connector->base,
167                         connector->dev->mode_config.dp_subconnector_property,
168                         subconnector);
169 }
170
171 /*
172  * initializes drm_device display related structures, based on the information
173  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
174  * drm_encoder, drm_mode_config
175  *
176  * Returns 0 on success
177  */
178 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
179 /* removes and deallocates the drm structures, created by the above function */
180 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
181
182 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
183                                 struct drm_plane *plane,
184                                 unsigned long possible_crtcs,
185                                 const struct dc_plane_cap *plane_cap);
186 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
187                                struct drm_plane *plane,
188                                uint32_t link_index);
189 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
190                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
191                                     uint32_t link_index,
192                                     struct amdgpu_encoder *amdgpu_encoder);
193 static int amdgpu_dm_encoder_init(struct drm_device *dev,
194                                   struct amdgpu_encoder *aencoder,
195                                   uint32_t link_index);
196
197 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
198
199 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
200
201 static int amdgpu_dm_atomic_check(struct drm_device *dev,
202                                   struct drm_atomic_state *state);
203
204 static void handle_cursor_update(struct drm_plane *plane,
205                                  struct drm_plane_state *old_plane_state);
206
207 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
208 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
209 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
210 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
211 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
212
213 static const struct drm_format_info *
214 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
215
216 static bool
217 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
218                                  struct drm_crtc_state *new_crtc_state);
219 /*
220  * dm_vblank_get_counter
221  *
222  * @brief
223  * Get counter for number of vertical blanks
224  *
225  * @param
226  * struct amdgpu_device *adev - [in] desired amdgpu device
227  * int disp_idx - [in] which CRTC to get the counter from
228  *
229  * @return
230  * Counter for vertical blanks
231  */
232 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
233 {
234         if (crtc >= adev->mode_info.num_crtc)
235                 return 0;
236         else {
237                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
238
239                 if (acrtc->dm_irq_params.stream == NULL) {
240                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
241                                   crtc);
242                         return 0;
243                 }
244
245                 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
246         }
247 }
248
249 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
250                                   u32 *vbl, u32 *position)
251 {
252         uint32_t v_blank_start, v_blank_end, h_position, v_position;
253
254         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
255                 return -EINVAL;
256         else {
257                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
258
259                 if (acrtc->dm_irq_params.stream ==  NULL) {
260                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
261                                   crtc);
262                         return 0;
263                 }
264
265                 /*
266                  * TODO rework base driver to use values directly.
267                  * for now parse it back into reg-format
268                  */
269                 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
270                                          &v_blank_start,
271                                          &v_blank_end,
272                                          &h_position,
273                                          &v_position);
274
275                 *position = v_position | (h_position << 16);
276                 *vbl = v_blank_start | (v_blank_end << 16);
277         }
278
279         return 0;
280 }
281
282 static bool dm_is_idle(void *handle)
283 {
284         /* XXX todo */
285         return true;
286 }
287
288 static int dm_wait_for_idle(void *handle)
289 {
290         /* XXX todo */
291         return 0;
292 }
293
294 static bool dm_check_soft_reset(void *handle)
295 {
296         return false;
297 }
298
299 static int dm_soft_reset(void *handle)
300 {
301         /* XXX todo */
302         return 0;
303 }
304
305 static struct amdgpu_crtc *
306 get_crtc_by_otg_inst(struct amdgpu_device *adev,
307                      int otg_inst)
308 {
309         struct drm_device *dev = adev_to_drm(adev);
310         struct drm_crtc *crtc;
311         struct amdgpu_crtc *amdgpu_crtc;
312
313         if (otg_inst == -1) {
314                 WARN_ON(1);
315                 return adev->mode_info.crtcs[0];
316         }
317
318         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
319                 amdgpu_crtc = to_amdgpu_crtc(crtc);
320
321                 if (amdgpu_crtc->otg_inst == otg_inst)
322                         return amdgpu_crtc;
323         }
324
325         return NULL;
326 }
327
328 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
329 {
330         return acrtc->dm_irq_params.freesync_config.state ==
331                        VRR_STATE_ACTIVE_VARIABLE ||
332                acrtc->dm_irq_params.freesync_config.state ==
333                        VRR_STATE_ACTIVE_FIXED;
334 }
335
336 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
337 {
338         return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
339                dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
340 }
341
342 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
343                                               struct dm_crtc_state *new_state)
344 {
345         if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
346                 return true;
347         else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
348                 return true;
349         else
350                 return false;
351 }
352
353 /**
354  * dm_pflip_high_irq() - Handle pageflip interrupt
355  * @interrupt_params: ignored
356  *
357  * Handles the pageflip interrupt by notifying all interested parties
358  * that the pageflip has been completed.
359  */
360 static void dm_pflip_high_irq(void *interrupt_params)
361 {
362         struct amdgpu_crtc *amdgpu_crtc;
363         struct common_irq_params *irq_params = interrupt_params;
364         struct amdgpu_device *adev = irq_params->adev;
365         unsigned long flags;
366         struct drm_pending_vblank_event *e;
367         uint32_t vpos, hpos, v_blank_start, v_blank_end;
368         bool vrr_active;
369
370         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
371
372         /* IRQ could occur when in initial stage */
373         /* TODO work and BO cleanup */
374         if (amdgpu_crtc == NULL) {
375                 DC_LOG_PFLIP("CRTC is null, returning.\n");
376                 return;
377         }
378
379         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
380
381         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
382                 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
383                                                  amdgpu_crtc->pflip_status,
384                                                  AMDGPU_FLIP_SUBMITTED,
385                                                  amdgpu_crtc->crtc_id,
386                                                  amdgpu_crtc);
387                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
388                 return;
389         }
390
391         /* page flip completed. */
392         e = amdgpu_crtc->event;
393         amdgpu_crtc->event = NULL;
394
395         if (!e)
396                 WARN_ON(1);
397
398         vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
399
400         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
401         if (!vrr_active ||
402             !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
403                                       &v_blank_end, &hpos, &vpos) ||
404             (vpos < v_blank_start)) {
405                 /* Update to correct count and vblank timestamp if racing with
406                  * vblank irq. This also updates to the correct vblank timestamp
407                  * even in VRR mode, as scanout is past the front-porch atm.
408                  */
409                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
410
411                 /* Wake up userspace by sending the pageflip event with proper
412                  * count and timestamp of vblank of flip completion.
413                  */
414                 if (e) {
415                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
416
417                         /* Event sent, so done with vblank for this flip */
418                         drm_crtc_vblank_put(&amdgpu_crtc->base);
419                 }
420         } else if (e) {
421                 /* VRR active and inside front-porch: vblank count and
422                  * timestamp for pageflip event will only be up to date after
423                  * drm_crtc_handle_vblank() has been executed from late vblank
424                  * irq handler after start of back-porch (vline 0). We queue the
425                  * pageflip event for send-out by drm_crtc_handle_vblank() with
426                  * updated timestamp and count, once it runs after us.
427                  *
428                  * We need to open-code this instead of using the helper
429                  * drm_crtc_arm_vblank_event(), as that helper would
430                  * call drm_crtc_accurate_vblank_count(), which we must
431                  * not call in VRR mode while we are in front-porch!
432                  */
433
434                 /* sequence will be replaced by real count during send-out. */
435                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
436                 e->pipe = amdgpu_crtc->crtc_id;
437
438                 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
439                 e = NULL;
440         }
441
442         /* Keep track of vblank of this flip for flip throttling. We use the
443          * cooked hw counter, as that one incremented at start of this vblank
444          * of pageflip completion, so last_flip_vblank is the forbidden count
445          * for queueing new pageflips if vsync + VRR is enabled.
446          */
447         amdgpu_crtc->dm_irq_params.last_flip_vblank =
448                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
449
450         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
451         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
452
453         DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
454                      amdgpu_crtc->crtc_id, amdgpu_crtc,
455                      vrr_active, (int) !e);
456 }
457
458 static void dm_vupdate_high_irq(void *interrupt_params)
459 {
460         struct common_irq_params *irq_params = interrupt_params;
461         struct amdgpu_device *adev = irq_params->adev;
462         struct amdgpu_crtc *acrtc;
463         struct drm_device *drm_dev;
464         struct drm_vblank_crtc *vblank;
465         ktime_t frame_duration_ns, previous_timestamp;
466         unsigned long flags;
467         int vrr_active;
468
469         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
470
471         if (acrtc) {
472                 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
473                 drm_dev = acrtc->base.dev;
474                 vblank = &drm_dev->vblank[acrtc->base.index];
475                 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
476                 frame_duration_ns = vblank->time - previous_timestamp;
477
478                 if (frame_duration_ns > 0) {
479                         trace_amdgpu_refresh_rate_track(acrtc->base.index,
480                                                 frame_duration_ns,
481                                                 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
482                         atomic64_set(&irq_params->previous_timestamp, vblank->time);
483                 }
484
485                 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
486                               acrtc->crtc_id,
487                               vrr_active);
488
489                 /* Core vblank handling is done here after end of front-porch in
490                  * vrr mode, as vblank timestamping will give valid results
491                  * while now done after front-porch. This will also deliver
492                  * page-flip completion events that have been queued to us
493                  * if a pageflip happened inside front-porch.
494                  */
495                 if (vrr_active) {
496                         drm_crtc_handle_vblank(&acrtc->base);
497
498                         /* BTR processing for pre-DCE12 ASICs */
499                         if (acrtc->dm_irq_params.stream &&
500                             adev->family < AMDGPU_FAMILY_AI) {
501                                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
502                                 mod_freesync_handle_v_update(
503                                     adev->dm.freesync_module,
504                                     acrtc->dm_irq_params.stream,
505                                     &acrtc->dm_irq_params.vrr_params);
506
507                                 dc_stream_adjust_vmin_vmax(
508                                     adev->dm.dc,
509                                     acrtc->dm_irq_params.stream,
510                                     &acrtc->dm_irq_params.vrr_params.adjust);
511                                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
512                         }
513                 }
514         }
515 }
516
517 /**
518  * dm_crtc_high_irq() - Handles CRTC interrupt
519  * @interrupt_params: used for determining the CRTC instance
520  *
521  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
522  * event handler.
523  */
524 static void dm_crtc_high_irq(void *interrupt_params)
525 {
526         struct common_irq_params *irq_params = interrupt_params;
527         struct amdgpu_device *adev = irq_params->adev;
528         struct amdgpu_crtc *acrtc;
529         unsigned long flags;
530         int vrr_active;
531
532         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
533         if (!acrtc)
534                 return;
535
536         vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
537
538         DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
539                       vrr_active, acrtc->dm_irq_params.active_planes);
540
541         /**
542          * Core vblank handling at start of front-porch is only possible
543          * in non-vrr mode, as only there vblank timestamping will give
544          * valid results while done in front-porch. Otherwise defer it
545          * to dm_vupdate_high_irq after end of front-porch.
546          */
547         if (!vrr_active)
548                 drm_crtc_handle_vblank(&acrtc->base);
549
550         /**
551          * Following stuff must happen at start of vblank, for crc
552          * computation and below-the-range btr support in vrr mode.
553          */
554         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
555
556         /* BTR updates need to happen before VUPDATE on Vega and above. */
557         if (adev->family < AMDGPU_FAMILY_AI)
558                 return;
559
560         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
561
562         if (acrtc->dm_irq_params.stream &&
563             acrtc->dm_irq_params.vrr_params.supported &&
564             acrtc->dm_irq_params.freesync_config.state ==
565                     VRR_STATE_ACTIVE_VARIABLE) {
566                 mod_freesync_handle_v_update(adev->dm.freesync_module,
567                                              acrtc->dm_irq_params.stream,
568                                              &acrtc->dm_irq_params.vrr_params);
569
570                 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
571                                            &acrtc->dm_irq_params.vrr_params.adjust);
572         }
573
574         /*
575          * If there aren't any active_planes then DCH HUBP may be clock-gated.
576          * In that case, pageflip completion interrupts won't fire and pageflip
577          * completion events won't get delivered. Prevent this by sending
578          * pending pageflip events from here if a flip is still pending.
579          *
580          * If any planes are enabled, use dm_pflip_high_irq() instead, to
581          * avoid race conditions between flip programming and completion,
582          * which could cause too early flip completion events.
583          */
584         if (adev->family >= AMDGPU_FAMILY_RV &&
585             acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
586             acrtc->dm_irq_params.active_planes == 0) {
587                 if (acrtc->event) {
588                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
589                         acrtc->event = NULL;
590                         drm_crtc_vblank_put(&acrtc->base);
591                 }
592                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
593         }
594
595         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
596 }
597
598 #if defined(CONFIG_DRM_AMD_DC_DCN)
599 /**
600  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
601  * DCN generation ASICs
602  * @interrupt params - interrupt parameters
603  *
604  * Used to set crc window/read out crc value at vertical line 0 position
605  */
606 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
607 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
608 {
609         struct common_irq_params *irq_params = interrupt_params;
610         struct amdgpu_device *adev = irq_params->adev;
611         struct amdgpu_crtc *acrtc;
612
613         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
614
615         if (!acrtc)
616                 return;
617
618         amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
619 }
620 #endif
621 #endif
622
623 static int dm_set_clockgating_state(void *handle,
624                   enum amd_clockgating_state state)
625 {
626         return 0;
627 }
628
629 static int dm_set_powergating_state(void *handle,
630                   enum amd_powergating_state state)
631 {
632         return 0;
633 }
634
635 /* Prototypes of private functions */
636 static int dm_early_init(void* handle);
637
638 /* Allocate memory for FBC compressed data  */
639 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
640 {
641         struct drm_device *dev = connector->dev;
642         struct amdgpu_device *adev = drm_to_adev(dev);
643         struct dm_compressor_info *compressor = &adev->dm.compressor;
644         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
645         struct drm_display_mode *mode;
646         unsigned long max_size = 0;
647
648         if (adev->dm.dc->fbc_compressor == NULL)
649                 return;
650
651         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
652                 return;
653
654         if (compressor->bo_ptr)
655                 return;
656
657
658         list_for_each_entry(mode, &connector->modes, head) {
659                 if (max_size < mode->htotal * mode->vtotal)
660                         max_size = mode->htotal * mode->vtotal;
661         }
662
663         if (max_size) {
664                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
665                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
666                             &compressor->gpu_addr, &compressor->cpu_addr);
667
668                 if (r)
669                         DRM_ERROR("DM: Failed to initialize FBC\n");
670                 else {
671                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
672                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
673                 }
674
675         }
676
677 }
678
679 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
680                                           int pipe, bool *enabled,
681                                           unsigned char *buf, int max_bytes)
682 {
683         struct drm_device *dev = dev_get_drvdata(kdev);
684         struct amdgpu_device *adev = drm_to_adev(dev);
685         struct drm_connector *connector;
686         struct drm_connector_list_iter conn_iter;
687         struct amdgpu_dm_connector *aconnector;
688         int ret = 0;
689
690         *enabled = false;
691
692         mutex_lock(&adev->dm.audio_lock);
693
694         drm_connector_list_iter_begin(dev, &conn_iter);
695         drm_for_each_connector_iter(connector, &conn_iter) {
696                 aconnector = to_amdgpu_dm_connector(connector);
697                 if (aconnector->audio_inst != port)
698                         continue;
699
700                 *enabled = true;
701                 ret = drm_eld_size(connector->eld);
702                 memcpy(buf, connector->eld, min(max_bytes, ret));
703
704                 break;
705         }
706         drm_connector_list_iter_end(&conn_iter);
707
708         mutex_unlock(&adev->dm.audio_lock);
709
710         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
711
712         return ret;
713 }
714
715 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
716         .get_eld = amdgpu_dm_audio_component_get_eld,
717 };
718
719 static int amdgpu_dm_audio_component_bind(struct device *kdev,
720                                        struct device *hda_kdev, void *data)
721 {
722         struct drm_device *dev = dev_get_drvdata(kdev);
723         struct amdgpu_device *adev = drm_to_adev(dev);
724         struct drm_audio_component *acomp = data;
725
726         acomp->ops = &amdgpu_dm_audio_component_ops;
727         acomp->dev = kdev;
728         adev->dm.audio_component = acomp;
729
730         return 0;
731 }
732
733 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
734                                           struct device *hda_kdev, void *data)
735 {
736         struct drm_device *dev = dev_get_drvdata(kdev);
737         struct amdgpu_device *adev = drm_to_adev(dev);
738         struct drm_audio_component *acomp = data;
739
740         acomp->ops = NULL;
741         acomp->dev = NULL;
742         adev->dm.audio_component = NULL;
743 }
744
745 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
746         .bind   = amdgpu_dm_audio_component_bind,
747         .unbind = amdgpu_dm_audio_component_unbind,
748 };
749
750 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
751 {
752         int i, ret;
753
754         if (!amdgpu_audio)
755                 return 0;
756
757         adev->mode_info.audio.enabled = true;
758
759         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
760
761         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
762                 adev->mode_info.audio.pin[i].channels = -1;
763                 adev->mode_info.audio.pin[i].rate = -1;
764                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
765                 adev->mode_info.audio.pin[i].status_bits = 0;
766                 adev->mode_info.audio.pin[i].category_code = 0;
767                 adev->mode_info.audio.pin[i].connected = false;
768                 adev->mode_info.audio.pin[i].id =
769                         adev->dm.dc->res_pool->audios[i]->inst;
770                 adev->mode_info.audio.pin[i].offset = 0;
771         }
772
773         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
774         if (ret < 0)
775                 return ret;
776
777         adev->dm.audio_registered = true;
778
779         return 0;
780 }
781
782 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
783 {
784         if (!amdgpu_audio)
785                 return;
786
787         if (!adev->mode_info.audio.enabled)
788                 return;
789
790         if (adev->dm.audio_registered) {
791                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
792                 adev->dm.audio_registered = false;
793         }
794
795         /* TODO: Disable audio? */
796
797         adev->mode_info.audio.enabled = false;
798 }
799
800 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
801 {
802         struct drm_audio_component *acomp = adev->dm.audio_component;
803
804         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
805                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
806
807                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
808                                                  pin, -1);
809         }
810 }
811
812 static int dm_dmub_hw_init(struct amdgpu_device *adev)
813 {
814         const struct dmcub_firmware_header_v1_0 *hdr;
815         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
816         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
817         const struct firmware *dmub_fw = adev->dm.dmub_fw;
818         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
819         struct abm *abm = adev->dm.dc->res_pool->abm;
820         struct dmub_srv_hw_params hw_params;
821         enum dmub_status status;
822         const unsigned char *fw_inst_const, *fw_bss_data;
823         uint32_t i, fw_inst_const_size, fw_bss_data_size;
824         bool has_hw_support;
825
826         if (!dmub_srv)
827                 /* DMUB isn't supported on the ASIC. */
828                 return 0;
829
830         if (!fb_info) {
831                 DRM_ERROR("No framebuffer info for DMUB service.\n");
832                 return -EINVAL;
833         }
834
835         if (!dmub_fw) {
836                 /* Firmware required for DMUB support. */
837                 DRM_ERROR("No firmware provided for DMUB.\n");
838                 return -EINVAL;
839         }
840
841         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
842         if (status != DMUB_STATUS_OK) {
843                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
844                 return -EINVAL;
845         }
846
847         if (!has_hw_support) {
848                 DRM_INFO("DMUB unsupported on ASIC\n");
849                 return 0;
850         }
851
852         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
853
854         fw_inst_const = dmub_fw->data +
855                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
856                         PSP_HEADER_BYTES;
857
858         fw_bss_data = dmub_fw->data +
859                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
860                       le32_to_cpu(hdr->inst_const_bytes);
861
862         /* Copy firmware and bios info into FB memory. */
863         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
864                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
865
866         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
867
868         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
869          * amdgpu_ucode_init_single_fw will load dmub firmware
870          * fw_inst_const part to cw0; otherwise, the firmware back door load
871          * will be done by dm_dmub_hw_init
872          */
873         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
874                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
875                                 fw_inst_const_size);
876         }
877
878         if (fw_bss_data_size)
879                 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
880                        fw_bss_data, fw_bss_data_size);
881
882         /* Copy firmware bios info into FB memory. */
883         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
884                adev->bios_size);
885
886         /* Reset regions that need to be reset. */
887         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
888         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
889
890         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
891                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
892
893         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
894                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
895
896         /* Initialize hardware. */
897         memset(&hw_params, 0, sizeof(hw_params));
898         hw_params.fb_base = adev->gmc.fb_start;
899         hw_params.fb_offset = adev->gmc.aper_base;
900
901         /* backdoor load firmware and trigger dmub running */
902         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
903                 hw_params.load_inst_const = true;
904
905         if (dmcu)
906                 hw_params.psp_version = dmcu->psp_version;
907
908         for (i = 0; i < fb_info->num_fb; ++i)
909                 hw_params.fb[i] = &fb_info->fb[i];
910
911         status = dmub_srv_hw_init(dmub_srv, &hw_params);
912         if (status != DMUB_STATUS_OK) {
913                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
914                 return -EINVAL;
915         }
916
917         /* Wait for firmware load to finish. */
918         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
919         if (status != DMUB_STATUS_OK)
920                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
921
922         /* Init DMCU and ABM if available. */
923         if (dmcu && abm) {
924                 dmcu->funcs->dmcu_init(dmcu);
925                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
926         }
927
928         adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
929         if (!adev->dm.dc->ctx->dmub_srv) {
930                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
931                 return -ENOMEM;
932         }
933
934         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
935                  adev->dm.dmcub_fw_version);
936
937         return 0;
938 }
939
940 #if defined(CONFIG_DRM_AMD_DC_DCN)
941 #define DMUB_TRACE_MAX_READ 64
942 static void dm_dmub_trace_high_irq(void *interrupt_params)
943 {
944         struct common_irq_params *irq_params = interrupt_params;
945         struct amdgpu_device *adev = irq_params->adev;
946         struct amdgpu_display_manager *dm = &adev->dm;
947         struct dmcub_trace_buf_entry entry = { 0 };
948         uint32_t count = 0;
949
950         do {
951                 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
952                         trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
953                                                         entry.param0, entry.param1);
954
955                         DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
956                                  entry.trace_code, entry.tick_count, entry.param0, entry.param1);
957                 } else
958                         break;
959
960                 count++;
961
962         } while (count <= DMUB_TRACE_MAX_READ);
963
964         ASSERT(count <= DMUB_TRACE_MAX_READ);
965 }
966
967 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
968 {
969         uint64_t pt_base;
970         uint32_t logical_addr_low;
971         uint32_t logical_addr_high;
972         uint32_t agp_base, agp_bot, agp_top;
973         PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
974
975         logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
976         pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
977
978         if (adev->apu_flags & AMD_APU_IS_RAVEN2)
979                 /*
980                  * Raven2 has a HW issue that it is unable to use the vram which
981                  * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
982                  * workaround that increase system aperture high address (add 1)
983                  * to get rid of the VM fault and hardware hang.
984                  */
985                 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
986         else
987                 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
988
989         agp_base = 0;
990         agp_bot = adev->gmc.agp_start >> 24;
991         agp_top = adev->gmc.agp_end >> 24;
992
993
994         page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
995         page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
996         page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
997         page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
998         page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
999         page_table_base.low_part = lower_32_bits(pt_base);
1000
1001         pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1002         pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1003
1004         pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1005         pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1006         pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1007
1008         pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1009         pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1010         pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1011
1012         pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1013         pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1014         pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1015
1016         pa_config->is_hvm_enabled = 0;
1017
1018 }
1019 #endif
1020 #if defined(CONFIG_DRM_AMD_DC_DCN)
1021 static void event_mall_stutter(struct work_struct *work)
1022 {
1023
1024         struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
1025         struct amdgpu_display_manager *dm = vblank_work->dm;
1026
1027         mutex_lock(&dm->dc_lock);
1028
1029         if (vblank_work->enable)
1030                 dm->active_vblank_irq_count++;
1031         else if(dm->active_vblank_irq_count)
1032                 dm->active_vblank_irq_count--;
1033
1034         dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1035
1036         DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1037
1038         mutex_unlock(&dm->dc_lock);
1039 }
1040
1041 static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
1042 {
1043
1044         int max_caps = dc->caps.max_links;
1045         struct vblank_workqueue *vblank_work;
1046         int i = 0;
1047
1048         vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
1049         if (ZERO_OR_NULL_PTR(vblank_work)) {
1050                 kfree(vblank_work);
1051                 return NULL;
1052         }
1053
1054         for (i = 0; i < max_caps; i++)
1055                 INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
1056
1057         return vblank_work;
1058 }
1059 #endif
1060 static int amdgpu_dm_init(struct amdgpu_device *adev)
1061 {
1062         struct dc_init_data init_data;
1063 #ifdef CONFIG_DRM_AMD_DC_HDCP
1064         struct dc_callback_init init_params;
1065 #endif
1066         int r;
1067
1068         adev->dm.ddev = adev_to_drm(adev);
1069         adev->dm.adev = adev;
1070
1071         /* Zero all the fields */
1072         memset(&init_data, 0, sizeof(init_data));
1073 #ifdef CONFIG_DRM_AMD_DC_HDCP
1074         memset(&init_params, 0, sizeof(init_params));
1075 #endif
1076
1077         mutex_init(&adev->dm.dc_lock);
1078         mutex_init(&adev->dm.audio_lock);
1079 #if defined(CONFIG_DRM_AMD_DC_DCN)
1080         spin_lock_init(&adev->dm.vblank_lock);
1081 #endif
1082
1083         if(amdgpu_dm_irq_init(adev)) {
1084                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1085                 goto error;
1086         }
1087
1088         init_data.asic_id.chip_family = adev->family;
1089
1090         init_data.asic_id.pci_revision_id = adev->pdev->revision;
1091         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1092
1093         init_data.asic_id.vram_width = adev->gmc.vram_width;
1094         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1095         init_data.asic_id.atombios_base_address =
1096                 adev->mode_info.atom_context->bios;
1097
1098         init_data.driver = adev;
1099
1100         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1101
1102         if (!adev->dm.cgs_device) {
1103                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1104                 goto error;
1105         }
1106
1107         init_data.cgs_device = adev->dm.cgs_device;
1108
1109         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1110
1111         switch (adev->asic_type) {
1112         case CHIP_CARRIZO:
1113         case CHIP_STONEY:
1114         case CHIP_RAVEN:
1115         case CHIP_RENOIR:
1116                 init_data.flags.gpu_vm_support = true;
1117                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1118                         init_data.flags.disable_dmcu = true;
1119                 break;
1120 #if defined(CONFIG_DRM_AMD_DC_DCN)
1121         case CHIP_VANGOGH:
1122                 init_data.flags.gpu_vm_support = true;
1123                 break;
1124 #endif
1125         default:
1126                 break;
1127         }
1128
1129         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1130                 init_data.flags.fbc_support = true;
1131
1132         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1133                 init_data.flags.multi_mon_pp_mclk_switch = true;
1134
1135         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1136                 init_data.flags.disable_fractional_pwm = true;
1137
1138         init_data.flags.power_down_display_on_boot = true;
1139
1140         INIT_LIST_HEAD(&adev->dm.da_list);
1141         /* Display Core create. */
1142         adev->dm.dc = dc_create(&init_data);
1143
1144         if (adev->dm.dc) {
1145                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1146         } else {
1147                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1148                 goto error;
1149         }
1150
1151         if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1152                 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1153                 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1154         }
1155
1156         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1157                 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1158
1159         if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1160                 adev->dm.dc->debug.disable_stutter = true;
1161
1162         if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1163                 adev->dm.dc->debug.disable_dsc = true;
1164
1165         if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1166                 adev->dm.dc->debug.disable_clock_gate = true;
1167
1168         r = dm_dmub_hw_init(adev);
1169         if (r) {
1170                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1171                 goto error;
1172         }
1173
1174         dc_hardware_init(adev->dm.dc);
1175
1176 #if defined(CONFIG_DRM_AMD_DC_DCN)
1177         if (adev->apu_flags) {
1178                 struct dc_phy_addr_space_config pa_config;
1179
1180                 mmhub_read_system_context(adev, &pa_config);
1181
1182                 // Call the DC init_memory func
1183                 dc_setup_system_context(adev->dm.dc, &pa_config);
1184         }
1185 #endif
1186
1187         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1188         if (!adev->dm.freesync_module) {
1189                 DRM_ERROR(
1190                 "amdgpu: failed to initialize freesync_module.\n");
1191         } else
1192                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1193                                 adev->dm.freesync_module);
1194
1195         amdgpu_dm_init_color_mod();
1196
1197 #if defined(CONFIG_DRM_AMD_DC_DCN)
1198         if (adev->dm.dc->caps.max_links > 0) {
1199                 adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
1200
1201                 if (!adev->dm.vblank_workqueue)
1202                         DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1203                 else
1204                         DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
1205         }
1206 #endif
1207
1208 #ifdef CONFIG_DRM_AMD_DC_HDCP
1209         if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1210                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1211
1212                 if (!adev->dm.hdcp_workqueue)
1213                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1214                 else
1215                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1216
1217                 dc_init_callbacks(adev->dm.dc, &init_params);
1218         }
1219 #endif
1220 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1221         adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1222 #endif
1223         if (amdgpu_dm_initialize_drm_device(adev)) {
1224                 DRM_ERROR(
1225                 "amdgpu: failed to initialize sw for display support.\n");
1226                 goto error;
1227         }
1228
1229         /* create fake encoders for MST */
1230         dm_dp_create_fake_mst_encoders(adev);
1231
1232         /* TODO: Add_display_info? */
1233
1234         /* TODO use dynamic cursor width */
1235         adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1236         adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1237
1238         if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1239                 DRM_ERROR(
1240                 "amdgpu: failed to initialize sw for display support.\n");
1241                 goto error;
1242         }
1243
1244
1245         DRM_DEBUG_DRIVER("KMS initialized.\n");
1246
1247         return 0;
1248 error:
1249         amdgpu_dm_fini(adev);
1250
1251         return -EINVAL;
1252 }
1253
1254 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1255 {
1256         int i;
1257
1258         for (i = 0; i < adev->dm.display_indexes_num; i++) {
1259                 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1260         }
1261
1262         amdgpu_dm_audio_fini(adev);
1263
1264         amdgpu_dm_destroy_drm_device(&adev->dm);
1265
1266 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1267         if (adev->dm.crc_rd_wrk) {
1268                 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1269                 kfree(adev->dm.crc_rd_wrk);
1270                 adev->dm.crc_rd_wrk = NULL;
1271         }
1272 #endif
1273 #ifdef CONFIG_DRM_AMD_DC_HDCP
1274         if (adev->dm.hdcp_workqueue) {
1275                 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1276                 adev->dm.hdcp_workqueue = NULL;
1277         }
1278
1279         if (adev->dm.dc)
1280                 dc_deinit_callbacks(adev->dm.dc);
1281 #endif
1282
1283 #if defined(CONFIG_DRM_AMD_DC_DCN)
1284         if (adev->dm.vblank_workqueue) {
1285                 adev->dm.vblank_workqueue->dm = NULL;
1286                 kfree(adev->dm.vblank_workqueue);
1287                 adev->dm.vblank_workqueue = NULL;
1288         }
1289 #endif
1290
1291         if (adev->dm.dc->ctx->dmub_srv) {
1292                 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1293                 adev->dm.dc->ctx->dmub_srv = NULL;
1294         }
1295
1296         if (adev->dm.dmub_bo)
1297                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1298                                       &adev->dm.dmub_bo_gpu_addr,
1299                                       &adev->dm.dmub_bo_cpu_addr);
1300
1301         /* DC Destroy TODO: Replace destroy DAL */
1302         if (adev->dm.dc)
1303                 dc_destroy(&adev->dm.dc);
1304         /*
1305          * TODO: pageflip, vlank interrupt
1306          *
1307          * amdgpu_dm_irq_fini(adev);
1308          */
1309
1310         if (adev->dm.cgs_device) {
1311                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1312                 adev->dm.cgs_device = NULL;
1313         }
1314         if (adev->dm.freesync_module) {
1315                 mod_freesync_destroy(adev->dm.freesync_module);
1316                 adev->dm.freesync_module = NULL;
1317         }
1318
1319         mutex_destroy(&adev->dm.audio_lock);
1320         mutex_destroy(&adev->dm.dc_lock);
1321
1322         return;
1323 }
1324
1325 static int load_dmcu_fw(struct amdgpu_device *adev)
1326 {
1327         const char *fw_name_dmcu = NULL;
1328         int r;
1329         const struct dmcu_firmware_header_v1_0 *hdr;
1330
1331         switch(adev->asic_type) {
1332 #if defined(CONFIG_DRM_AMD_DC_SI)
1333         case CHIP_TAHITI:
1334         case CHIP_PITCAIRN:
1335         case CHIP_VERDE:
1336         case CHIP_OLAND:
1337 #endif
1338         case CHIP_BONAIRE:
1339         case CHIP_HAWAII:
1340         case CHIP_KAVERI:
1341         case CHIP_KABINI:
1342         case CHIP_MULLINS:
1343         case CHIP_TONGA:
1344         case CHIP_FIJI:
1345         case CHIP_CARRIZO:
1346         case CHIP_STONEY:
1347         case CHIP_POLARIS11:
1348         case CHIP_POLARIS10:
1349         case CHIP_POLARIS12:
1350         case CHIP_VEGAM:
1351         case CHIP_VEGA10:
1352         case CHIP_VEGA12:
1353         case CHIP_VEGA20:
1354         case CHIP_NAVI10:
1355         case CHIP_NAVI14:
1356         case CHIP_RENOIR:
1357         case CHIP_SIENNA_CICHLID:
1358         case CHIP_NAVY_FLOUNDER:
1359         case CHIP_DIMGREY_CAVEFISH:
1360         case CHIP_VANGOGH:
1361                 return 0;
1362         case CHIP_NAVI12:
1363                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1364                 break;
1365         case CHIP_RAVEN:
1366                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1367                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1368                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1369                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1370                 else
1371                         return 0;
1372                 break;
1373         default:
1374                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1375                 return -EINVAL;
1376         }
1377
1378         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1379                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1380                 return 0;
1381         }
1382
1383         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1384         if (r == -ENOENT) {
1385                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1386                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1387                 adev->dm.fw_dmcu = NULL;
1388                 return 0;
1389         }
1390         if (r) {
1391                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1392                         fw_name_dmcu);
1393                 return r;
1394         }
1395
1396         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1397         if (r) {
1398                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1399                         fw_name_dmcu);
1400                 release_firmware(adev->dm.fw_dmcu);
1401                 adev->dm.fw_dmcu = NULL;
1402                 return r;
1403         }
1404
1405         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1406         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1407         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1408         adev->firmware.fw_size +=
1409                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1410
1411         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1412         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1413         adev->firmware.fw_size +=
1414                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1415
1416         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1417
1418         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1419
1420         return 0;
1421 }
1422
1423 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1424 {
1425         struct amdgpu_device *adev = ctx;
1426
1427         return dm_read_reg(adev->dm.dc->ctx, address);
1428 }
1429
1430 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1431                                      uint32_t value)
1432 {
1433         struct amdgpu_device *adev = ctx;
1434
1435         return dm_write_reg(adev->dm.dc->ctx, address, value);
1436 }
1437
1438 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1439 {
1440         struct dmub_srv_create_params create_params;
1441         struct dmub_srv_region_params region_params;
1442         struct dmub_srv_region_info region_info;
1443         struct dmub_srv_fb_params fb_params;
1444         struct dmub_srv_fb_info *fb_info;
1445         struct dmub_srv *dmub_srv;
1446         const struct dmcub_firmware_header_v1_0 *hdr;
1447         const char *fw_name_dmub;
1448         enum dmub_asic dmub_asic;
1449         enum dmub_status status;
1450         int r;
1451
1452         switch (adev->asic_type) {
1453         case CHIP_RENOIR:
1454                 dmub_asic = DMUB_ASIC_DCN21;
1455                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1456                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1457                         fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1458                 break;
1459         case CHIP_SIENNA_CICHLID:
1460                 dmub_asic = DMUB_ASIC_DCN30;
1461                 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1462                 break;
1463         case CHIP_NAVY_FLOUNDER:
1464                 dmub_asic = DMUB_ASIC_DCN30;
1465                 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1466                 break;
1467         case CHIP_VANGOGH:
1468                 dmub_asic = DMUB_ASIC_DCN301;
1469                 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1470                 break;
1471         case CHIP_DIMGREY_CAVEFISH:
1472                 dmub_asic = DMUB_ASIC_DCN302;
1473                 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1474                 break;
1475
1476         default:
1477                 /* ASIC doesn't support DMUB. */
1478                 return 0;
1479         }
1480
1481         r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1482         if (r) {
1483                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1484                 return 0;
1485         }
1486
1487         r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1488         if (r) {
1489                 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1490                 return 0;
1491         }
1492
1493         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1494
1495         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1496                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1497                         AMDGPU_UCODE_ID_DMCUB;
1498                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1499                         adev->dm.dmub_fw;
1500                 adev->firmware.fw_size +=
1501                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1502
1503                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1504                          adev->dm.dmcub_fw_version);
1505         }
1506
1507         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1508
1509         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1510         dmub_srv = adev->dm.dmub_srv;
1511
1512         if (!dmub_srv) {
1513                 DRM_ERROR("Failed to allocate DMUB service!\n");
1514                 return -ENOMEM;
1515         }
1516
1517         memset(&create_params, 0, sizeof(create_params));
1518         create_params.user_ctx = adev;
1519         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1520         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1521         create_params.asic = dmub_asic;
1522
1523         /* Create the DMUB service. */
1524         status = dmub_srv_create(dmub_srv, &create_params);
1525         if (status != DMUB_STATUS_OK) {
1526                 DRM_ERROR("Error creating DMUB service: %d\n", status);
1527                 return -EINVAL;
1528         }
1529
1530         /* Calculate the size of all the regions for the DMUB service. */
1531         memset(&region_params, 0, sizeof(region_params));
1532
1533         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1534                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1535         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1536         region_params.vbios_size = adev->bios_size;
1537         region_params.fw_bss_data = region_params.bss_data_size ?
1538                 adev->dm.dmub_fw->data +
1539                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1540                 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1541         region_params.fw_inst_const =
1542                 adev->dm.dmub_fw->data +
1543                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1544                 PSP_HEADER_BYTES;
1545
1546         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1547                                            &region_info);
1548
1549         if (status != DMUB_STATUS_OK) {
1550                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1551                 return -EINVAL;
1552         }
1553
1554         /*
1555          * Allocate a framebuffer based on the total size of all the regions.
1556          * TODO: Move this into GART.
1557          */
1558         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1559                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1560                                     &adev->dm.dmub_bo_gpu_addr,
1561                                     &adev->dm.dmub_bo_cpu_addr);
1562         if (r)
1563                 return r;
1564
1565         /* Rebase the regions on the framebuffer address. */
1566         memset(&fb_params, 0, sizeof(fb_params));
1567         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1568         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1569         fb_params.region_info = &region_info;
1570
1571         adev->dm.dmub_fb_info =
1572                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1573         fb_info = adev->dm.dmub_fb_info;
1574
1575         if (!fb_info) {
1576                 DRM_ERROR(
1577                         "Failed to allocate framebuffer info for DMUB service!\n");
1578                 return -ENOMEM;
1579         }
1580
1581         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1582         if (status != DMUB_STATUS_OK) {
1583                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1584                 return -EINVAL;
1585         }
1586
1587         return 0;
1588 }
1589
1590 static int dm_sw_init(void *handle)
1591 {
1592         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1593         int r;
1594
1595         r = dm_dmub_sw_init(adev);
1596         if (r)
1597                 return r;
1598
1599         return load_dmcu_fw(adev);
1600 }
1601
1602 static int dm_sw_fini(void *handle)
1603 {
1604         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1605
1606         kfree(adev->dm.dmub_fb_info);
1607         adev->dm.dmub_fb_info = NULL;
1608
1609         if (adev->dm.dmub_srv) {
1610                 dmub_srv_destroy(adev->dm.dmub_srv);
1611                 adev->dm.dmub_srv = NULL;
1612         }
1613
1614         release_firmware(adev->dm.dmub_fw);
1615         adev->dm.dmub_fw = NULL;
1616
1617         release_firmware(adev->dm.fw_dmcu);
1618         adev->dm.fw_dmcu = NULL;
1619
1620         return 0;
1621 }
1622
1623 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1624 {
1625         struct amdgpu_dm_connector *aconnector;
1626         struct drm_connector *connector;
1627         struct drm_connector_list_iter iter;
1628         int ret = 0;
1629
1630         drm_connector_list_iter_begin(dev, &iter);
1631         drm_for_each_connector_iter(connector, &iter) {
1632                 aconnector = to_amdgpu_dm_connector(connector);
1633                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1634                     aconnector->mst_mgr.aux) {
1635                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1636                                          aconnector,
1637                                          aconnector->base.base.id);
1638
1639                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1640                         if (ret < 0) {
1641                                 DRM_ERROR("DM_MST: Failed to start MST\n");
1642                                 aconnector->dc_link->type =
1643                                         dc_connection_single;
1644                                 break;
1645                         }
1646                 }
1647         }
1648         drm_connector_list_iter_end(&iter);
1649
1650         return ret;
1651 }
1652
1653 static int dm_late_init(void *handle)
1654 {
1655         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1656
1657         struct dmcu_iram_parameters params;
1658         unsigned int linear_lut[16];
1659         int i;
1660         struct dmcu *dmcu = NULL;
1661         bool ret = true;
1662
1663         dmcu = adev->dm.dc->res_pool->dmcu;
1664
1665         for (i = 0; i < 16; i++)
1666                 linear_lut[i] = 0xFFFF * i / 15;
1667
1668         params.set = 0;
1669         params.backlight_ramping_start = 0xCCCC;
1670         params.backlight_ramping_reduction = 0xCCCCCCCC;
1671         params.backlight_lut_array_size = 16;
1672         params.backlight_lut_array = linear_lut;
1673
1674         /* Min backlight level after ABM reduction,  Don't allow below 1%
1675          * 0xFFFF x 0.01 = 0x28F
1676          */
1677         params.min_abm_backlight = 0x28F;
1678
1679         /* In the case where abm is implemented on dmcub,
1680          * dmcu object will be null.
1681          * ABM 2.4 and up are implemented on dmcub.
1682          */
1683         if (dmcu)
1684                 ret = dmcu_load_iram(dmcu, params);
1685         else if (adev->dm.dc->ctx->dmub_srv)
1686                 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1687
1688         if (!ret)
1689                 return -EINVAL;
1690
1691         return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1692 }
1693
1694 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1695 {
1696         struct amdgpu_dm_connector *aconnector;
1697         struct drm_connector *connector;
1698         struct drm_connector_list_iter iter;
1699         struct drm_dp_mst_topology_mgr *mgr;
1700         int ret;
1701         bool need_hotplug = false;
1702
1703         drm_connector_list_iter_begin(dev, &iter);
1704         drm_for_each_connector_iter(connector, &iter) {
1705                 aconnector = to_amdgpu_dm_connector(connector);
1706                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1707                     aconnector->mst_port)
1708                         continue;
1709
1710                 mgr = &aconnector->mst_mgr;
1711
1712                 if (suspend) {
1713                         drm_dp_mst_topology_mgr_suspend(mgr);
1714                 } else {
1715                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1716                         if (ret < 0) {
1717                                 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1718                                 need_hotplug = true;
1719                         }
1720                 }
1721         }
1722         drm_connector_list_iter_end(&iter);
1723
1724         if (need_hotplug)
1725                 drm_kms_helper_hotplug_event(dev);
1726 }
1727
1728 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1729 {
1730         struct smu_context *smu = &adev->smu;
1731         int ret = 0;
1732
1733         if (!is_support_sw_smu(adev))
1734                 return 0;
1735
1736         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1737          * on window driver dc implementation.
1738          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1739          * should be passed to smu during boot up and resume from s3.
1740          * boot up: dc calculate dcn watermark clock settings within dc_create,
1741          * dcn20_resource_construct
1742          * then call pplib functions below to pass the settings to smu:
1743          * smu_set_watermarks_for_clock_ranges
1744          * smu_set_watermarks_table
1745          * navi10_set_watermarks_table
1746          * smu_write_watermarks_table
1747          *
1748          * For Renoir, clock settings of dcn watermark are also fixed values.
1749          * dc has implemented different flow for window driver:
1750          * dc_hardware_init / dc_set_power_state
1751          * dcn10_init_hw
1752          * notify_wm_ranges
1753          * set_wm_ranges
1754          * -- Linux
1755          * smu_set_watermarks_for_clock_ranges
1756          * renoir_set_watermarks_table
1757          * smu_write_watermarks_table
1758          *
1759          * For Linux,
1760          * dc_hardware_init -> amdgpu_dm_init
1761          * dc_set_power_state --> dm_resume
1762          *
1763          * therefore, this function apply to navi10/12/14 but not Renoir
1764          * *
1765          */
1766         switch(adev->asic_type) {
1767         case CHIP_NAVI10:
1768         case CHIP_NAVI14:
1769         case CHIP_NAVI12:
1770                 break;
1771         default:
1772                 return 0;
1773         }
1774
1775         ret = smu_write_watermarks_table(smu);
1776         if (ret) {
1777                 DRM_ERROR("Failed to update WMTABLE!\n");
1778                 return ret;
1779         }
1780
1781         return 0;
1782 }
1783
1784 /**
1785  * dm_hw_init() - Initialize DC device
1786  * @handle: The base driver device containing the amdgpu_dm device.
1787  *
1788  * Initialize the &struct amdgpu_display_manager device. This involves calling
1789  * the initializers of each DM component, then populating the struct with them.
1790  *
1791  * Although the function implies hardware initialization, both hardware and
1792  * software are initialized here. Splitting them out to their relevant init
1793  * hooks is a future TODO item.
1794  *
1795  * Some notable things that are initialized here:
1796  *
1797  * - Display Core, both software and hardware
1798  * - DC modules that we need (freesync and color management)
1799  * - DRM software states
1800  * - Interrupt sources and handlers
1801  * - Vblank support
1802  * - Debug FS entries, if enabled
1803  */
1804 static int dm_hw_init(void *handle)
1805 {
1806         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1807         /* Create DAL display manager */
1808         amdgpu_dm_init(adev);
1809         amdgpu_dm_hpd_init(adev);
1810
1811         return 0;
1812 }
1813
1814 /**
1815  * dm_hw_fini() - Teardown DC device
1816  * @handle: The base driver device containing the amdgpu_dm device.
1817  *
1818  * Teardown components within &struct amdgpu_display_manager that require
1819  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1820  * were loaded. Also flush IRQ workqueues and disable them.
1821  */
1822 static int dm_hw_fini(void *handle)
1823 {
1824         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1825
1826         amdgpu_dm_hpd_fini(adev);
1827
1828         amdgpu_dm_irq_fini(adev);
1829         amdgpu_dm_fini(adev);
1830         return 0;
1831 }
1832
1833
1834 static int dm_enable_vblank(struct drm_crtc *crtc);
1835 static void dm_disable_vblank(struct drm_crtc *crtc);
1836
1837 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1838                                  struct dc_state *state, bool enable)
1839 {
1840         enum dc_irq_source irq_source;
1841         struct amdgpu_crtc *acrtc;
1842         int rc = -EBUSY;
1843         int i = 0;
1844
1845         for (i = 0; i < state->stream_count; i++) {
1846                 acrtc = get_crtc_by_otg_inst(
1847                                 adev, state->stream_status[i].primary_otg_inst);
1848
1849                 if (acrtc && state->stream_status[i].plane_count != 0) {
1850                         irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1851                         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1852                         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
1853                                       acrtc->crtc_id, enable ? "en" : "dis", rc);
1854                         if (rc)
1855                                 DRM_WARN("Failed to %s pflip interrupts\n",
1856                                          enable ? "enable" : "disable");
1857
1858                         if (enable) {
1859                                 rc = dm_enable_vblank(&acrtc->base);
1860                                 if (rc)
1861                                         DRM_WARN("Failed to enable vblank interrupts\n");
1862                         } else {
1863                                 dm_disable_vblank(&acrtc->base);
1864                         }
1865
1866                 }
1867         }
1868
1869 }
1870
1871 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1872 {
1873         struct dc_state *context = NULL;
1874         enum dc_status res = DC_ERROR_UNEXPECTED;
1875         int i;
1876         struct dc_stream_state *del_streams[MAX_PIPES];
1877         int del_streams_count = 0;
1878
1879         memset(del_streams, 0, sizeof(del_streams));
1880
1881         context = dc_create_state(dc);
1882         if (context == NULL)
1883                 goto context_alloc_fail;
1884
1885         dc_resource_state_copy_construct_current(dc, context);
1886
1887         /* First remove from context all streams */
1888         for (i = 0; i < context->stream_count; i++) {
1889                 struct dc_stream_state *stream = context->streams[i];
1890
1891                 del_streams[del_streams_count++] = stream;
1892         }
1893
1894         /* Remove all planes for removed streams and then remove the streams */
1895         for (i = 0; i < del_streams_count; i++) {
1896                 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1897                         res = DC_FAIL_DETACH_SURFACES;
1898                         goto fail;
1899                 }
1900
1901                 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1902                 if (res != DC_OK)
1903                         goto fail;
1904         }
1905
1906
1907         res = dc_validate_global_state(dc, context, false);
1908
1909         if (res != DC_OK) {
1910                 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1911                 goto fail;
1912         }
1913
1914         res = dc_commit_state(dc, context);
1915
1916 fail:
1917         dc_release_state(context);
1918
1919 context_alloc_fail:
1920         return res;
1921 }
1922
1923 static int dm_suspend(void *handle)
1924 {
1925         struct amdgpu_device *adev = handle;
1926         struct amdgpu_display_manager *dm = &adev->dm;
1927         int ret = 0;
1928
1929         if (amdgpu_in_reset(adev)) {
1930                 mutex_lock(&dm->dc_lock);
1931
1932 #if defined(CONFIG_DRM_AMD_DC_DCN)
1933                 dc_allow_idle_optimizations(adev->dm.dc, false);
1934 #endif
1935
1936                 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1937
1938                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1939
1940                 amdgpu_dm_commit_zero_streams(dm->dc);
1941
1942                 amdgpu_dm_irq_suspend(adev);
1943
1944                 return ret;
1945         }
1946
1947 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
1948         amdgpu_dm_crtc_secure_display_suspend(adev);
1949 #endif
1950         WARN_ON(adev->dm.cached_state);
1951         adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1952
1953         s3_handle_mst(adev_to_drm(adev), true);
1954
1955         amdgpu_dm_irq_suspend(adev);
1956
1957
1958         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1959
1960         return 0;
1961 }
1962
1963 static struct amdgpu_dm_connector *
1964 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1965                                              struct drm_crtc *crtc)
1966 {
1967         uint32_t i;
1968         struct drm_connector_state *new_con_state;
1969         struct drm_connector *connector;
1970         struct drm_crtc *crtc_from_state;
1971
1972         for_each_new_connector_in_state(state, connector, new_con_state, i) {
1973                 crtc_from_state = new_con_state->crtc;
1974
1975                 if (crtc_from_state == crtc)
1976                         return to_amdgpu_dm_connector(connector);
1977         }
1978
1979         return NULL;
1980 }
1981
1982 static void emulated_link_detect(struct dc_link *link)
1983 {
1984         struct dc_sink_init_data sink_init_data = { 0 };
1985         struct display_sink_capability sink_caps = { 0 };
1986         enum dc_edid_status edid_status;
1987         struct dc_context *dc_ctx = link->ctx;
1988         struct dc_sink *sink = NULL;
1989         struct dc_sink *prev_sink = NULL;
1990
1991         link->type = dc_connection_none;
1992         prev_sink = link->local_sink;
1993
1994         if (prev_sink)
1995                 dc_sink_release(prev_sink);
1996
1997         switch (link->connector_signal) {
1998         case SIGNAL_TYPE_HDMI_TYPE_A: {
1999                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2000                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2001                 break;
2002         }
2003
2004         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2005                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2006                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2007                 break;
2008         }
2009
2010         case SIGNAL_TYPE_DVI_DUAL_LINK: {
2011                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2012                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2013                 break;
2014         }
2015
2016         case SIGNAL_TYPE_LVDS: {
2017                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2018                 sink_caps.signal = SIGNAL_TYPE_LVDS;
2019                 break;
2020         }
2021
2022         case SIGNAL_TYPE_EDP: {
2023                 sink_caps.transaction_type =
2024                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2025                 sink_caps.signal = SIGNAL_TYPE_EDP;
2026                 break;
2027         }
2028
2029         case SIGNAL_TYPE_DISPLAY_PORT: {
2030                 sink_caps.transaction_type =
2031                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2032                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2033                 break;
2034         }
2035
2036         default:
2037                 DC_ERROR("Invalid connector type! signal:%d\n",
2038                         link->connector_signal);
2039                 return;
2040         }
2041
2042         sink_init_data.link = link;
2043         sink_init_data.sink_signal = sink_caps.signal;
2044
2045         sink = dc_sink_create(&sink_init_data);
2046         if (!sink) {
2047                 DC_ERROR("Failed to create sink!\n");
2048                 return;
2049         }
2050
2051         /* dc_sink_create returns a new reference */
2052         link->local_sink = sink;
2053
2054         edid_status = dm_helpers_read_local_edid(
2055                         link->ctx,
2056                         link,
2057                         sink);
2058
2059         if (edid_status != EDID_OK)
2060                 DC_ERROR("Failed to read EDID");
2061
2062 }
2063
2064 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2065                                      struct amdgpu_display_manager *dm)
2066 {
2067         struct {
2068                 struct dc_surface_update surface_updates[MAX_SURFACES];
2069                 struct dc_plane_info plane_infos[MAX_SURFACES];
2070                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2071                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2072                 struct dc_stream_update stream_update;
2073         } * bundle;
2074         int k, m;
2075
2076         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2077
2078         if (!bundle) {
2079                 dm_error("Failed to allocate update bundle\n");
2080                 goto cleanup;
2081         }
2082
2083         for (k = 0; k < dc_state->stream_count; k++) {
2084                 bundle->stream_update.stream = dc_state->streams[k];
2085
2086                 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2087                         bundle->surface_updates[m].surface =
2088                                 dc_state->stream_status->plane_states[m];
2089                         bundle->surface_updates[m].surface->force_full_update =
2090                                 true;
2091                 }
2092                 dc_commit_updates_for_stream(
2093                         dm->dc, bundle->surface_updates,
2094                         dc_state->stream_status->plane_count,
2095                         dc_state->streams[k], &bundle->stream_update, dc_state);
2096         }
2097
2098 cleanup:
2099         kfree(bundle);
2100
2101         return;
2102 }
2103
2104 static void dm_set_dpms_off(struct dc_link *link)
2105 {
2106         struct dc_stream_state *stream_state;
2107         struct amdgpu_dm_connector *aconnector = link->priv;
2108         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2109         struct dc_stream_update stream_update;
2110         bool dpms_off = true;
2111
2112         memset(&stream_update, 0, sizeof(stream_update));
2113         stream_update.dpms_off = &dpms_off;
2114
2115         mutex_lock(&adev->dm.dc_lock);
2116         stream_state = dc_stream_find_from_link(link);
2117
2118         if (stream_state == NULL) {
2119                 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2120                 mutex_unlock(&adev->dm.dc_lock);
2121                 return;
2122         }
2123
2124         stream_update.stream = stream_state;
2125         dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2126                                      stream_state, &stream_update,
2127                                      stream_state->ctx->dc->current_state);
2128         mutex_unlock(&adev->dm.dc_lock);
2129 }
2130
2131 static int dm_resume(void *handle)
2132 {
2133         struct amdgpu_device *adev = handle;
2134         struct drm_device *ddev = adev_to_drm(adev);
2135         struct amdgpu_display_manager *dm = &adev->dm;
2136         struct amdgpu_dm_connector *aconnector;
2137         struct drm_connector *connector;
2138         struct drm_connector_list_iter iter;
2139         struct drm_crtc *crtc;
2140         struct drm_crtc_state *new_crtc_state;
2141         struct dm_crtc_state *dm_new_crtc_state;
2142         struct drm_plane *plane;
2143         struct drm_plane_state *new_plane_state;
2144         struct dm_plane_state *dm_new_plane_state;
2145         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2146         enum dc_connection_type new_connection_type = dc_connection_none;
2147         struct dc_state *dc_state;
2148         int i, r, j;
2149
2150         if (amdgpu_in_reset(adev)) {
2151                 dc_state = dm->cached_dc_state;
2152
2153                 r = dm_dmub_hw_init(adev);
2154                 if (r)
2155                         DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2156
2157                 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2158                 dc_resume(dm->dc);
2159
2160                 amdgpu_dm_irq_resume_early(adev);
2161
2162                 for (i = 0; i < dc_state->stream_count; i++) {
2163                         dc_state->streams[i]->mode_changed = true;
2164                         for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2165                                 dc_state->stream_status->plane_states[j]->update_flags.raw
2166                                         = 0xffffffff;
2167                         }
2168                 }
2169
2170                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2171
2172                 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2173
2174                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2175
2176                 dc_release_state(dm->cached_dc_state);
2177                 dm->cached_dc_state = NULL;
2178
2179                 amdgpu_dm_irq_resume_late(adev);
2180
2181                 mutex_unlock(&dm->dc_lock);
2182
2183                 return 0;
2184         }
2185         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2186         dc_release_state(dm_state->context);
2187         dm_state->context = dc_create_state(dm->dc);
2188         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2189         dc_resource_state_construct(dm->dc, dm_state->context);
2190
2191         /* Before powering on DC we need to re-initialize DMUB. */
2192         r = dm_dmub_hw_init(adev);
2193         if (r)
2194                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2195
2196         /* power on hardware */
2197         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2198
2199         /* program HPD filter */
2200         dc_resume(dm->dc);
2201
2202         /*
2203          * early enable HPD Rx IRQ, should be done before set mode as short
2204          * pulse interrupts are used for MST
2205          */
2206         amdgpu_dm_irq_resume_early(adev);
2207
2208         /* On resume we need to rewrite the MSTM control bits to enable MST*/
2209         s3_handle_mst(ddev, false);
2210
2211         /* Do detection*/
2212         drm_connector_list_iter_begin(ddev, &iter);
2213         drm_for_each_connector_iter(connector, &iter) {
2214                 aconnector = to_amdgpu_dm_connector(connector);
2215
2216                 /*
2217                  * this is the case when traversing through already created
2218                  * MST connectors, should be skipped
2219                  */
2220                 if (aconnector->mst_port)
2221                         continue;
2222
2223                 mutex_lock(&aconnector->hpd_lock);
2224                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2225                         DRM_ERROR("KMS: Failed to detect connector\n");
2226
2227                 if (aconnector->base.force && new_connection_type == dc_connection_none)
2228                         emulated_link_detect(aconnector->dc_link);
2229                 else
2230                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2231
2232                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2233                         aconnector->fake_enable = false;
2234
2235                 if (aconnector->dc_sink)
2236                         dc_sink_release(aconnector->dc_sink);
2237                 aconnector->dc_sink = NULL;
2238                 amdgpu_dm_update_connector_after_detect(aconnector);
2239                 mutex_unlock(&aconnector->hpd_lock);
2240         }
2241         drm_connector_list_iter_end(&iter);
2242
2243         /* Force mode set in atomic commit */
2244         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2245                 new_crtc_state->active_changed = true;
2246
2247         /*
2248          * atomic_check is expected to create the dc states. We need to release
2249          * them here, since they were duplicated as part of the suspend
2250          * procedure.
2251          */
2252         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2253                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2254                 if (dm_new_crtc_state->stream) {
2255                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2256                         dc_stream_release(dm_new_crtc_state->stream);
2257                         dm_new_crtc_state->stream = NULL;
2258                 }
2259         }
2260
2261         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2262                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2263                 if (dm_new_plane_state->dc_state) {
2264                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2265                         dc_plane_state_release(dm_new_plane_state->dc_state);
2266                         dm_new_plane_state->dc_state = NULL;
2267                 }
2268         }
2269
2270         drm_atomic_helper_resume(ddev, dm->cached_state);
2271
2272         dm->cached_state = NULL;
2273
2274 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
2275         amdgpu_dm_crtc_secure_display_resume(adev);
2276 #endif
2277
2278         amdgpu_dm_irq_resume_late(adev);
2279
2280         amdgpu_dm_smu_write_watermarks_table(adev);
2281
2282         return 0;
2283 }
2284
2285 /**
2286  * DOC: DM Lifecycle
2287  *
2288  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2289  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2290  * the base driver's device list to be initialized and torn down accordingly.
2291  *
2292  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2293  */
2294
2295 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2296         .name = "dm",
2297         .early_init = dm_early_init,
2298         .late_init = dm_late_init,
2299         .sw_init = dm_sw_init,
2300         .sw_fini = dm_sw_fini,
2301         .hw_init = dm_hw_init,
2302         .hw_fini = dm_hw_fini,
2303         .suspend = dm_suspend,
2304         .resume = dm_resume,
2305         .is_idle = dm_is_idle,
2306         .wait_for_idle = dm_wait_for_idle,
2307         .check_soft_reset = dm_check_soft_reset,
2308         .soft_reset = dm_soft_reset,
2309         .set_clockgating_state = dm_set_clockgating_state,
2310         .set_powergating_state = dm_set_powergating_state,
2311 };
2312
2313 const struct amdgpu_ip_block_version dm_ip_block =
2314 {
2315         .type = AMD_IP_BLOCK_TYPE_DCE,
2316         .major = 1,
2317         .minor = 0,
2318         .rev = 0,
2319         .funcs = &amdgpu_dm_funcs,
2320 };
2321
2322
2323 /**
2324  * DOC: atomic
2325  *
2326  * *WIP*
2327  */
2328
2329 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2330         .fb_create = amdgpu_display_user_framebuffer_create,
2331         .get_format_info = amd_get_format_info,
2332         .output_poll_changed = drm_fb_helper_output_poll_changed,
2333         .atomic_check = amdgpu_dm_atomic_check,
2334         .atomic_commit = drm_atomic_helper_commit,
2335 };
2336
2337 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2338         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2339 };
2340
2341 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2342 {
2343         u32 max_cll, min_cll, max, min, q, r;
2344         struct amdgpu_dm_backlight_caps *caps;
2345         struct amdgpu_display_manager *dm;
2346         struct drm_connector *conn_base;
2347         struct amdgpu_device *adev;
2348         struct dc_link *link = NULL;
2349         static const u8 pre_computed_values[] = {
2350                 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2351                 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2352
2353         if (!aconnector || !aconnector->dc_link)
2354                 return;
2355
2356         link = aconnector->dc_link;
2357         if (link->connector_signal != SIGNAL_TYPE_EDP)
2358                 return;
2359
2360         conn_base = &aconnector->base;
2361         adev = drm_to_adev(conn_base->dev);
2362         dm = &adev->dm;
2363         caps = &dm->backlight_caps;
2364         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2365         caps->aux_support = false;
2366         max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2367         min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2368
2369         if (caps->ext_caps->bits.oled == 1 ||
2370             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2371             caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2372                 caps->aux_support = true;
2373
2374         if (amdgpu_backlight == 0)
2375                 caps->aux_support = false;
2376         else if (amdgpu_backlight == 1)
2377                 caps->aux_support = true;
2378
2379         /* From the specification (CTA-861-G), for calculating the maximum
2380          * luminance we need to use:
2381          *      Luminance = 50*2**(CV/32)
2382          * Where CV is a one-byte value.
2383          * For calculating this expression we may need float point precision;
2384          * to avoid this complexity level, we take advantage that CV is divided
2385          * by a constant. From the Euclids division algorithm, we know that CV
2386          * can be written as: CV = 32*q + r. Next, we replace CV in the
2387          * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2388          * need to pre-compute the value of r/32. For pre-computing the values
2389          * We just used the following Ruby line:
2390          *      (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2391          * The results of the above expressions can be verified at
2392          * pre_computed_values.
2393          */
2394         q = max_cll >> 5;
2395         r = max_cll % 32;
2396         max = (1 << q) * pre_computed_values[r];
2397
2398         // min luminance: maxLum * (CV/255)^2 / 100
2399         q = DIV_ROUND_CLOSEST(min_cll, 255);
2400         min = max * DIV_ROUND_CLOSEST((q * q), 100);
2401
2402         caps->aux_max_input_signal = max;
2403         caps->aux_min_input_signal = min;
2404 }
2405
2406 void amdgpu_dm_update_connector_after_detect(
2407                 struct amdgpu_dm_connector *aconnector)
2408 {
2409         struct drm_connector *connector = &aconnector->base;
2410         struct drm_device *dev = connector->dev;
2411         struct dc_sink *sink;
2412
2413         /* MST handled by drm_mst framework */
2414         if (aconnector->mst_mgr.mst_state == true)
2415                 return;
2416
2417         sink = aconnector->dc_link->local_sink;
2418         if (sink)
2419                 dc_sink_retain(sink);
2420
2421         /*
2422          * Edid mgmt connector gets first update only in mode_valid hook and then
2423          * the connector sink is set to either fake or physical sink depends on link status.
2424          * Skip if already done during boot.
2425          */
2426         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2427                         && aconnector->dc_em_sink) {
2428
2429                 /*
2430                  * For S3 resume with headless use eml_sink to fake stream
2431                  * because on resume connector->sink is set to NULL
2432                  */
2433                 mutex_lock(&dev->mode_config.mutex);
2434
2435                 if (sink) {
2436                         if (aconnector->dc_sink) {
2437                                 amdgpu_dm_update_freesync_caps(connector, NULL);
2438                                 /*
2439                                  * retain and release below are used to
2440                                  * bump up refcount for sink because the link doesn't point
2441                                  * to it anymore after disconnect, so on next crtc to connector
2442                                  * reshuffle by UMD we will get into unwanted dc_sink release
2443                                  */
2444                                 dc_sink_release(aconnector->dc_sink);
2445                         }
2446                         aconnector->dc_sink = sink;
2447                         dc_sink_retain(aconnector->dc_sink);
2448                         amdgpu_dm_update_freesync_caps(connector,
2449                                         aconnector->edid);
2450                 } else {
2451                         amdgpu_dm_update_freesync_caps(connector, NULL);
2452                         if (!aconnector->dc_sink) {
2453                                 aconnector->dc_sink = aconnector->dc_em_sink;
2454                                 dc_sink_retain(aconnector->dc_sink);
2455                         }
2456                 }
2457
2458                 mutex_unlock(&dev->mode_config.mutex);
2459
2460                 if (sink)
2461                         dc_sink_release(sink);
2462                 return;
2463         }
2464
2465         /*
2466          * TODO: temporary guard to look for proper fix
2467          * if this sink is MST sink, we should not do anything
2468          */
2469         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2470                 dc_sink_release(sink);
2471                 return;
2472         }
2473
2474         if (aconnector->dc_sink == sink) {
2475                 /*
2476                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
2477                  * Do nothing!!
2478                  */
2479                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2480                                 aconnector->connector_id);
2481                 if (sink)
2482                         dc_sink_release(sink);
2483                 return;
2484         }
2485
2486         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2487                 aconnector->connector_id, aconnector->dc_sink, sink);
2488
2489         mutex_lock(&dev->mode_config.mutex);
2490
2491         /*
2492          * 1. Update status of the drm connector
2493          * 2. Send an event and let userspace tell us what to do
2494          */
2495         if (sink) {
2496                 /*
2497                  * TODO: check if we still need the S3 mode update workaround.
2498                  * If yes, put it here.
2499                  */
2500                 if (aconnector->dc_sink) {
2501                         amdgpu_dm_update_freesync_caps(connector, NULL);
2502                         dc_sink_release(aconnector->dc_sink);
2503                 }
2504
2505                 aconnector->dc_sink = sink;
2506                 dc_sink_retain(aconnector->dc_sink);
2507                 if (sink->dc_edid.length == 0) {
2508                         aconnector->edid = NULL;
2509                         if (aconnector->dc_link->aux_mode) {
2510                                 drm_dp_cec_unset_edid(
2511                                         &aconnector->dm_dp_aux.aux);
2512                         }
2513                 } else {
2514                         aconnector->edid =
2515                                 (struct edid *)sink->dc_edid.raw_edid;
2516
2517                         drm_connector_update_edid_property(connector,
2518                                                            aconnector->edid);
2519                         if (aconnector->dc_link->aux_mode)
2520                                 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2521                                                     aconnector->edid);
2522                 }
2523
2524                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2525                 update_connector_ext_caps(aconnector);
2526         } else {
2527                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2528                 amdgpu_dm_update_freesync_caps(connector, NULL);
2529                 drm_connector_update_edid_property(connector, NULL);
2530                 aconnector->num_modes = 0;
2531                 dc_sink_release(aconnector->dc_sink);
2532                 aconnector->dc_sink = NULL;
2533                 aconnector->edid = NULL;
2534 #ifdef CONFIG_DRM_AMD_DC_HDCP
2535                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2536                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2537                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2538 #endif
2539         }
2540
2541         mutex_unlock(&dev->mode_config.mutex);
2542
2543         update_subconnector_property(aconnector);
2544
2545         if (sink)
2546                 dc_sink_release(sink);
2547 }
2548
2549 static void handle_hpd_irq(void *param)
2550 {
2551         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2552         struct drm_connector *connector = &aconnector->base;
2553         struct drm_device *dev = connector->dev;
2554         enum dc_connection_type new_connection_type = dc_connection_none;
2555 #ifdef CONFIG_DRM_AMD_DC_HDCP
2556         struct amdgpu_device *adev = drm_to_adev(dev);
2557         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2558 #endif
2559
2560         /*
2561          * In case of failure or MST no need to update connector status or notify the OS
2562          * since (for MST case) MST does this in its own context.
2563          */
2564         mutex_lock(&aconnector->hpd_lock);
2565
2566 #ifdef CONFIG_DRM_AMD_DC_HDCP
2567         if (adev->dm.hdcp_workqueue) {
2568                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2569                 dm_con_state->update_hdcp = true;
2570         }
2571 #endif
2572         if (aconnector->fake_enable)
2573                 aconnector->fake_enable = false;
2574
2575         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2576                 DRM_ERROR("KMS: Failed to detect connector\n");
2577
2578         if (aconnector->base.force && new_connection_type == dc_connection_none) {
2579                 emulated_link_detect(aconnector->dc_link);
2580
2581
2582                 drm_modeset_lock_all(dev);
2583                 dm_restore_drm_connector_state(dev, connector);
2584                 drm_modeset_unlock_all(dev);
2585
2586                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2587                         drm_kms_helper_hotplug_event(dev);
2588
2589         } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2590                 if (new_connection_type == dc_connection_none &&
2591                     aconnector->dc_link->type == dc_connection_none)
2592                         dm_set_dpms_off(aconnector->dc_link);
2593
2594                 amdgpu_dm_update_connector_after_detect(aconnector);
2595
2596                 drm_modeset_lock_all(dev);
2597                 dm_restore_drm_connector_state(dev, connector);
2598                 drm_modeset_unlock_all(dev);
2599
2600                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2601                         drm_kms_helper_hotplug_event(dev);
2602         }
2603         mutex_unlock(&aconnector->hpd_lock);
2604
2605 }
2606
2607 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2608 {
2609         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2610         uint8_t dret;
2611         bool new_irq_handled = false;
2612         int dpcd_addr;
2613         int dpcd_bytes_to_read;
2614
2615         const int max_process_count = 30;
2616         int process_count = 0;
2617
2618         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2619
2620         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2621                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2622                 /* DPCD 0x200 - 0x201 for downstream IRQ */
2623                 dpcd_addr = DP_SINK_COUNT;
2624         } else {
2625                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2626                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2627                 dpcd_addr = DP_SINK_COUNT_ESI;
2628         }
2629
2630         dret = drm_dp_dpcd_read(
2631                 &aconnector->dm_dp_aux.aux,
2632                 dpcd_addr,
2633                 esi,
2634                 dpcd_bytes_to_read);
2635
2636         while (dret == dpcd_bytes_to_read &&
2637                 process_count < max_process_count) {
2638                 uint8_t retry;
2639                 dret = 0;
2640
2641                 process_count++;
2642
2643                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2644                 /* handle HPD short pulse irq */
2645                 if (aconnector->mst_mgr.mst_state)
2646                         drm_dp_mst_hpd_irq(
2647                                 &aconnector->mst_mgr,
2648                                 esi,
2649                                 &new_irq_handled);
2650
2651                 if (new_irq_handled) {
2652                         /* ACK at DPCD to notify down stream */
2653                         const int ack_dpcd_bytes_to_write =
2654                                 dpcd_bytes_to_read - 1;
2655
2656                         for (retry = 0; retry < 3; retry++) {
2657                                 uint8_t wret;
2658
2659                                 wret = drm_dp_dpcd_write(
2660                                         &aconnector->dm_dp_aux.aux,
2661                                         dpcd_addr + 1,
2662                                         &esi[1],
2663                                         ack_dpcd_bytes_to_write);
2664                                 if (wret == ack_dpcd_bytes_to_write)
2665                                         break;
2666                         }
2667
2668                         /* check if there is new irq to be handled */
2669                         dret = drm_dp_dpcd_read(
2670                                 &aconnector->dm_dp_aux.aux,
2671                                 dpcd_addr,
2672                                 esi,
2673                                 dpcd_bytes_to_read);
2674
2675                         new_irq_handled = false;
2676                 } else {
2677                         break;
2678                 }
2679         }
2680
2681         if (process_count == max_process_count)
2682                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2683 }
2684
2685 static void handle_hpd_rx_irq(void *param)
2686 {
2687         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2688         struct drm_connector *connector = &aconnector->base;
2689         struct drm_device *dev = connector->dev;
2690         struct dc_link *dc_link = aconnector->dc_link;
2691         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2692         bool result = false;
2693         enum dc_connection_type new_connection_type = dc_connection_none;
2694         struct amdgpu_device *adev = drm_to_adev(dev);
2695         union hpd_irq_data hpd_irq_data;
2696
2697         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2698
2699         /*
2700          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2701          * conflict, after implement i2c helper, this mutex should be
2702          * retired.
2703          */
2704         if (dc_link->type != dc_connection_mst_branch)
2705                 mutex_lock(&aconnector->hpd_lock);
2706
2707         read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2708
2709         if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2710                 (dc_link->type == dc_connection_mst_branch)) {
2711                 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2712                         result = true;
2713                         dm_handle_hpd_rx_irq(aconnector);
2714                         goto out;
2715                 } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2716                         result = false;
2717                         dm_handle_hpd_rx_irq(aconnector);
2718                         goto out;
2719                 }
2720         }
2721
2722         mutex_lock(&adev->dm.dc_lock);
2723 #ifdef CONFIG_DRM_AMD_DC_HDCP
2724         result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2725 #else
2726         result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2727 #endif
2728         mutex_unlock(&adev->dm.dc_lock);
2729
2730 out:
2731         if (result && !is_mst_root_connector) {
2732                 /* Downstream Port status changed. */
2733                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2734                         DRM_ERROR("KMS: Failed to detect connector\n");
2735
2736                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2737                         emulated_link_detect(dc_link);
2738
2739                         if (aconnector->fake_enable)
2740                                 aconnector->fake_enable = false;
2741
2742                         amdgpu_dm_update_connector_after_detect(aconnector);
2743
2744
2745                         drm_modeset_lock_all(dev);
2746                         dm_restore_drm_connector_state(dev, connector);
2747                         drm_modeset_unlock_all(dev);
2748
2749                         drm_kms_helper_hotplug_event(dev);
2750                 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2751
2752                         if (aconnector->fake_enable)
2753                                 aconnector->fake_enable = false;
2754
2755                         amdgpu_dm_update_connector_after_detect(aconnector);
2756
2757
2758                         drm_modeset_lock_all(dev);
2759                         dm_restore_drm_connector_state(dev, connector);
2760                         drm_modeset_unlock_all(dev);
2761
2762                         drm_kms_helper_hotplug_event(dev);
2763                 }
2764         }
2765 #ifdef CONFIG_DRM_AMD_DC_HDCP
2766         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2767                 if (adev->dm.hdcp_workqueue)
2768                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2769         }
2770 #endif
2771
2772         if (dc_link->type != dc_connection_mst_branch) {
2773                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2774                 mutex_unlock(&aconnector->hpd_lock);
2775         }
2776 }
2777
2778 static void register_hpd_handlers(struct amdgpu_device *adev)
2779 {
2780         struct drm_device *dev = adev_to_drm(adev);
2781         struct drm_connector *connector;
2782         struct amdgpu_dm_connector *aconnector;
2783         const struct dc_link *dc_link;
2784         struct dc_interrupt_params int_params = {0};
2785
2786         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2787         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2788
2789         list_for_each_entry(connector,
2790                         &dev->mode_config.connector_list, head) {
2791
2792                 aconnector = to_amdgpu_dm_connector(connector);
2793                 dc_link = aconnector->dc_link;
2794
2795                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2796                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2797                         int_params.irq_source = dc_link->irq_source_hpd;
2798
2799                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2800                                         handle_hpd_irq,
2801                                         (void *) aconnector);
2802                 }
2803
2804                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2805
2806                         /* Also register for DP short pulse (hpd_rx). */
2807                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2808                         int_params.irq_source = dc_link->irq_source_hpd_rx;
2809
2810                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2811                                         handle_hpd_rx_irq,
2812                                         (void *) aconnector);
2813                 }
2814         }
2815 }
2816
2817 #if defined(CONFIG_DRM_AMD_DC_SI)
2818 /* Register IRQ sources and initialize IRQ callbacks */
2819 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2820 {
2821         struct dc *dc = adev->dm.dc;
2822         struct common_irq_params *c_irq_params;
2823         struct dc_interrupt_params int_params = {0};
2824         int r;
2825         int i;
2826         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2827
2828         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2829         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2830
2831         /*
2832          * Actions of amdgpu_irq_add_id():
2833          * 1. Register a set() function with base driver.
2834          *    Base driver will call set() function to enable/disable an
2835          *    interrupt in DC hardware.
2836          * 2. Register amdgpu_dm_irq_handler().
2837          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2838          *    coming from DC hardware.
2839          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2840          *    for acknowledging and handling. */
2841
2842         /* Use VBLANK interrupt */
2843         for (i = 0; i < adev->mode_info.num_crtc; i++) {
2844                 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2845                 if (r) {
2846                         DRM_ERROR("Failed to add crtc irq id!\n");
2847                         return r;
2848                 }
2849
2850                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2851                 int_params.irq_source =
2852                         dc_interrupt_to_irq_source(dc, i+1 , 0);
2853
2854                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2855
2856                 c_irq_params->adev = adev;
2857                 c_irq_params->irq_src = int_params.irq_source;
2858
2859                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2860                                 dm_crtc_high_irq, c_irq_params);
2861         }
2862
2863         /* Use GRPH_PFLIP interrupt */
2864         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2865                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2866                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2867                 if (r) {
2868                         DRM_ERROR("Failed to add page flip irq id!\n");
2869                         return r;
2870                 }
2871
2872                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2873                 int_params.irq_source =
2874                         dc_interrupt_to_irq_source(dc, i, 0);
2875
2876                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2877
2878                 c_irq_params->adev = adev;
2879                 c_irq_params->irq_src = int_params.irq_source;
2880
2881                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2882                                 dm_pflip_high_irq, c_irq_params);
2883
2884         }
2885
2886         /* HPD */
2887         r = amdgpu_irq_add_id(adev, client_id,
2888                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2889         if (r) {
2890                 DRM_ERROR("Failed to add hpd irq id!\n");
2891                 return r;
2892         }
2893
2894         register_hpd_handlers(adev);
2895
2896         return 0;
2897 }
2898 #endif
2899
2900 /* Register IRQ sources and initialize IRQ callbacks */
2901 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2902 {
2903         struct dc *dc = adev->dm.dc;
2904         struct common_irq_params *c_irq_params;
2905         struct dc_interrupt_params int_params = {0};
2906         int r;
2907         int i;
2908         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2909
2910         if (adev->asic_type >= CHIP_VEGA10)
2911                 client_id = SOC15_IH_CLIENTID_DCE;
2912
2913         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2914         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2915
2916         /*
2917          * Actions of amdgpu_irq_add_id():
2918          * 1. Register a set() function with base driver.
2919          *    Base driver will call set() function to enable/disable an
2920          *    interrupt in DC hardware.
2921          * 2. Register amdgpu_dm_irq_handler().
2922          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2923          *    coming from DC hardware.
2924          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2925          *    for acknowledging and handling. */
2926
2927         /* Use VBLANK interrupt */
2928         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2929                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2930                 if (r) {
2931                         DRM_ERROR("Failed to add crtc irq id!\n");
2932                         return r;
2933                 }
2934
2935                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2936                 int_params.irq_source =
2937                         dc_interrupt_to_irq_source(dc, i, 0);
2938
2939                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2940
2941                 c_irq_params->adev = adev;
2942                 c_irq_params->irq_src = int_params.irq_source;
2943
2944                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2945                                 dm_crtc_high_irq, c_irq_params);
2946         }
2947
2948         /* Use VUPDATE interrupt */
2949         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2950                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2951                 if (r) {
2952                         DRM_ERROR("Failed to add vupdate irq id!\n");
2953                         return r;
2954                 }
2955
2956                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2957                 int_params.irq_source =
2958                         dc_interrupt_to_irq_source(dc, i, 0);
2959
2960                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2961
2962                 c_irq_params->adev = adev;
2963                 c_irq_params->irq_src = int_params.irq_source;
2964
2965                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2966                                 dm_vupdate_high_irq, c_irq_params);
2967         }
2968
2969         /* Use GRPH_PFLIP interrupt */
2970         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2971                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2972                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2973                 if (r) {
2974                         DRM_ERROR("Failed to add page flip irq id!\n");
2975                         return r;
2976                 }
2977
2978                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2979                 int_params.irq_source =
2980                         dc_interrupt_to_irq_source(dc, i, 0);
2981
2982                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2983
2984                 c_irq_params->adev = adev;
2985                 c_irq_params->irq_src = int_params.irq_source;
2986
2987                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2988                                 dm_pflip_high_irq, c_irq_params);
2989
2990         }
2991
2992         /* HPD */
2993         r = amdgpu_irq_add_id(adev, client_id,
2994                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2995         if (r) {
2996                 DRM_ERROR("Failed to add hpd irq id!\n");
2997                 return r;
2998         }
2999
3000         register_hpd_handlers(adev);
3001
3002         return 0;
3003 }
3004
3005 #if defined(CONFIG_DRM_AMD_DC_DCN)
3006 /* Register IRQ sources and initialize IRQ callbacks */
3007 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3008 {
3009         struct dc *dc = adev->dm.dc;
3010         struct common_irq_params *c_irq_params;
3011         struct dc_interrupt_params int_params = {0};
3012         int r;
3013         int i;
3014 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3015         static const unsigned int vrtl_int_srcid[] = {
3016                 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3017                 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3018                 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3019                 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3020                 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3021                 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3022         };
3023 #endif
3024
3025         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3026         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3027
3028         /*
3029          * Actions of amdgpu_irq_add_id():
3030          * 1. Register a set() function with base driver.
3031          *    Base driver will call set() function to enable/disable an
3032          *    interrupt in DC hardware.
3033          * 2. Register amdgpu_dm_irq_handler().
3034          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3035          *    coming from DC hardware.
3036          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3037          *    for acknowledging and handling.
3038          */
3039
3040         /* Use VSTARTUP interrupt */
3041         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3042                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3043                         i++) {
3044                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3045
3046                 if (r) {
3047                         DRM_ERROR("Failed to add crtc irq id!\n");
3048                         return r;
3049                 }
3050
3051                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3052                 int_params.irq_source =
3053                         dc_interrupt_to_irq_source(dc, i, 0);
3054
3055                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3056
3057                 c_irq_params->adev = adev;
3058                 c_irq_params->irq_src = int_params.irq_source;
3059
3060                 amdgpu_dm_irq_register_interrupt(
3061                         adev, &int_params, dm_crtc_high_irq, c_irq_params);
3062         }
3063
3064         /* Use otg vertical line interrupt */
3065 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3066         for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3067                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3068                                 vrtl_int_srcid[i], &adev->vline0_irq);
3069
3070                 if (r) {
3071                         DRM_ERROR("Failed to add vline0 irq id!\n");
3072                         return r;
3073                 }
3074
3075                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3076                 int_params.irq_source =
3077                         dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3078
3079                 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3080                         DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3081                         break;
3082                 }
3083
3084                 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3085                                         - DC_IRQ_SOURCE_DC1_VLINE0];
3086
3087                 c_irq_params->adev = adev;
3088                 c_irq_params->irq_src = int_params.irq_source;
3089
3090                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3091                                 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3092         }
3093 #endif
3094
3095         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3096          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3097          * to trigger at end of each vblank, regardless of state of the lock,
3098          * matching DCE behaviour.
3099          */
3100         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3101              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3102              i++) {
3103                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3104
3105                 if (r) {
3106                         DRM_ERROR("Failed to add vupdate irq id!\n");
3107                         return r;
3108                 }
3109
3110                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3111                 int_params.irq_source =
3112                         dc_interrupt_to_irq_source(dc, i, 0);
3113
3114                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3115
3116                 c_irq_params->adev = adev;
3117                 c_irq_params->irq_src = int_params.irq_source;
3118
3119                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3120                                 dm_vupdate_high_irq, c_irq_params);
3121         }
3122
3123         /* Use GRPH_PFLIP interrupt */
3124         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3125                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3126                         i++) {
3127                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3128                 if (r) {
3129                         DRM_ERROR("Failed to add page flip irq id!\n");
3130                         return r;
3131                 }
3132
3133                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3134                 int_params.irq_source =
3135                         dc_interrupt_to_irq_source(dc, i, 0);
3136
3137                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3138
3139                 c_irq_params->adev = adev;
3140                 c_irq_params->irq_src = int_params.irq_source;
3141
3142                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3143                                 dm_pflip_high_irq, c_irq_params);
3144
3145         }
3146
3147         if (dc->ctx->dmub_srv) {
3148                 i = DCN_1_0__SRCID__DMCUB_OUTBOX_HIGH_PRIORITY_READY_INT;
3149                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->dmub_trace_irq);
3150
3151                 if (r) {
3152                         DRM_ERROR("Failed to add dmub trace irq id!\n");
3153                         return r;
3154                 }
3155
3156                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3157                 int_params.irq_source =
3158                         dc_interrupt_to_irq_source(dc, i, 0);
3159
3160                 c_irq_params = &adev->dm.dmub_trace_params[0];
3161
3162                 c_irq_params->adev = adev;
3163                 c_irq_params->irq_src = int_params.irq_source;
3164
3165                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3166                                 dm_dmub_trace_high_irq, c_irq_params);
3167         }
3168
3169         /* HPD */
3170         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3171                         &adev->hpd_irq);
3172         if (r) {
3173                 DRM_ERROR("Failed to add hpd irq id!\n");
3174                 return r;
3175         }
3176
3177         register_hpd_handlers(adev);
3178
3179         return 0;
3180 }
3181 #endif
3182
3183 /*
3184  * Acquires the lock for the atomic state object and returns
3185  * the new atomic state.
3186  *
3187  * This should only be called during atomic check.
3188  */
3189 static int dm_atomic_get_state(struct drm_atomic_state *state,
3190                                struct dm_atomic_state **dm_state)
3191 {
3192         struct drm_device *dev = state->dev;
3193         struct amdgpu_device *adev = drm_to_adev(dev);
3194         struct amdgpu_display_manager *dm = &adev->dm;
3195         struct drm_private_state *priv_state;
3196
3197         if (*dm_state)
3198                 return 0;
3199
3200         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3201         if (IS_ERR(priv_state))
3202                 return PTR_ERR(priv_state);
3203
3204         *dm_state = to_dm_atomic_state(priv_state);
3205
3206         return 0;
3207 }
3208
3209 static struct dm_atomic_state *
3210 dm_atomic_get_new_state(struct drm_atomic_state *state)
3211 {
3212         struct drm_device *dev = state->dev;
3213         struct amdgpu_device *adev = drm_to_adev(dev);
3214         struct amdgpu_display_manager *dm = &adev->dm;
3215         struct drm_private_obj *obj;
3216         struct drm_private_state *new_obj_state;
3217         int i;
3218
3219         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3220                 if (obj->funcs == dm->atomic_obj.funcs)
3221                         return to_dm_atomic_state(new_obj_state);
3222         }
3223
3224         return NULL;
3225 }
3226
3227 static struct drm_private_state *
3228 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3229 {
3230         struct dm_atomic_state *old_state, *new_state;
3231
3232         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3233         if (!new_state)
3234                 return NULL;
3235
3236         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3237
3238         old_state = to_dm_atomic_state(obj->state);
3239
3240         if (old_state && old_state->context)
3241                 new_state->context = dc_copy_state(old_state->context);
3242
3243         if (!new_state->context) {
3244                 kfree(new_state);
3245                 return NULL;
3246         }
3247
3248         return &new_state->base;
3249 }
3250
3251 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3252                                     struct drm_private_state *state)
3253 {
3254         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3255
3256         if (dm_state && dm_state->context)
3257                 dc_release_state(dm_state->context);
3258
3259         kfree(dm_state);
3260 }
3261
3262 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3263         .atomic_duplicate_state = dm_atomic_duplicate_state,
3264         .atomic_destroy_state = dm_atomic_destroy_state,
3265 };
3266
3267 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3268 {
3269         struct dm_atomic_state *state;
3270         int r;
3271
3272         adev->mode_info.mode_config_initialized = true;
3273
3274         adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3275         adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3276
3277         adev_to_drm(adev)->mode_config.max_width = 16384;
3278         adev_to_drm(adev)->mode_config.max_height = 16384;
3279
3280         adev_to_drm(adev)->mode_config.preferred_depth = 24;
3281         adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3282         /* indicates support for immediate flip */
3283         adev_to_drm(adev)->mode_config.async_page_flip = true;
3284
3285         adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3286
3287         state = kzalloc(sizeof(*state), GFP_KERNEL);
3288         if (!state)
3289                 return -ENOMEM;
3290
3291         state->context = dc_create_state(adev->dm.dc);
3292         if (!state->context) {
3293                 kfree(state);
3294                 return -ENOMEM;
3295         }
3296
3297         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3298
3299         drm_atomic_private_obj_init(adev_to_drm(adev),
3300                                     &adev->dm.atomic_obj,
3301                                     &state->base,
3302                                     &dm_atomic_state_funcs);
3303
3304         r = amdgpu_display_modeset_create_props(adev);
3305         if (r) {
3306                 dc_release_state(state->context);
3307                 kfree(state);
3308                 return r;
3309         }
3310
3311         r = amdgpu_dm_audio_init(adev);
3312         if (r) {
3313                 dc_release_state(state->context);
3314                 kfree(state);
3315                 return r;
3316         }
3317
3318         return 0;
3319 }
3320
3321 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3322 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3323 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3324
3325 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3326         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3327
3328 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3329 {
3330 #if defined(CONFIG_ACPI)
3331         struct amdgpu_dm_backlight_caps caps;
3332
3333         memset(&caps, 0, sizeof(caps));
3334
3335         if (dm->backlight_caps.caps_valid)
3336                 return;
3337
3338         amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3339         if (caps.caps_valid) {
3340                 dm->backlight_caps.caps_valid = true;
3341                 if (caps.aux_support)
3342                         return;
3343                 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3344                 dm->backlight_caps.max_input_signal = caps.max_input_signal;
3345         } else {
3346                 dm->backlight_caps.min_input_signal =
3347                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3348                 dm->backlight_caps.max_input_signal =
3349                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3350         }
3351 #else
3352         if (dm->backlight_caps.aux_support)
3353                 return;
3354
3355         dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3356         dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3357 #endif
3358 }
3359
3360 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3361                                 unsigned *min, unsigned *max)
3362 {
3363         if (!caps)
3364                 return 0;
3365
3366         if (caps->aux_support) {
3367                 // Firmware limits are in nits, DC API wants millinits.
3368                 *max = 1000 * caps->aux_max_input_signal;
3369                 *min = 1000 * caps->aux_min_input_signal;
3370         } else {
3371                 // Firmware limits are 8-bit, PWM control is 16-bit.
3372                 *max = 0x101 * caps->max_input_signal;
3373                 *min = 0x101 * caps->min_input_signal;
3374         }
3375         return 1;
3376 }
3377
3378 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3379                                         uint32_t brightness)
3380 {
3381         unsigned min, max;
3382
3383         if (!get_brightness_range(caps, &min, &max))
3384                 return brightness;
3385
3386         // Rescale 0..255 to min..max
3387         return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3388                                        AMDGPU_MAX_BL_LEVEL);
3389 }
3390
3391 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3392                                       uint32_t brightness)
3393 {
3394         unsigned min, max;
3395
3396         if (!get_brightness_range(caps, &min, &max))
3397                 return brightness;
3398
3399         if (brightness < min)
3400                 return 0;
3401         // Rescale min..max to 0..255
3402         return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3403                                  max - min);
3404 }
3405
3406 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3407 {
3408         struct amdgpu_display_manager *dm = bl_get_data(bd);
3409         struct amdgpu_dm_backlight_caps caps;
3410         struct dc_link *link = NULL;
3411         u32 brightness;
3412         bool rc;
3413
3414         amdgpu_dm_update_backlight_caps(dm);
3415         caps = dm->backlight_caps;
3416
3417         link = (struct dc_link *)dm->backlight_link;
3418
3419         brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3420         // Change brightness based on AUX property
3421         if (caps.aux_support)
3422                 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3423                                                       AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3424         else
3425                 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3426
3427         return rc ? 0 : 1;
3428 }
3429
3430 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3431 {
3432         struct amdgpu_display_manager *dm = bl_get_data(bd);
3433         struct amdgpu_dm_backlight_caps caps;
3434
3435         amdgpu_dm_update_backlight_caps(dm);
3436         caps = dm->backlight_caps;
3437
3438         if (caps.aux_support) {
3439                 struct dc_link *link = (struct dc_link *)dm->backlight_link;
3440                 u32 avg, peak;
3441                 bool rc;
3442
3443                 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3444                 if (!rc)
3445                         return bd->props.brightness;
3446                 return convert_brightness_to_user(&caps, avg);
3447         } else {
3448                 int ret = dc_link_get_backlight_level(dm->backlight_link);
3449
3450                 if (ret == DC_ERROR_UNEXPECTED)
3451                         return bd->props.brightness;
3452                 return convert_brightness_to_user(&caps, ret);
3453         }
3454 }
3455
3456 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3457         .options = BL_CORE_SUSPENDRESUME,
3458         .get_brightness = amdgpu_dm_backlight_get_brightness,
3459         .update_status  = amdgpu_dm_backlight_update_status,
3460 };
3461
3462 static void
3463 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3464 {
3465         char bl_name[16];
3466         struct backlight_properties props = { 0 };
3467
3468         amdgpu_dm_update_backlight_caps(dm);
3469
3470         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3471         props.brightness = AMDGPU_MAX_BL_LEVEL;
3472         props.type = BACKLIGHT_RAW;
3473
3474         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3475                  adev_to_drm(dm->adev)->primary->index);
3476
3477         dm->backlight_dev = backlight_device_register(bl_name,
3478                                                       adev_to_drm(dm->adev)->dev,
3479                                                       dm,
3480                                                       &amdgpu_dm_backlight_ops,
3481                                                       &props);
3482
3483         if (IS_ERR(dm->backlight_dev))
3484                 DRM_ERROR("DM: Backlight registration failed!\n");
3485         else
3486                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3487 }
3488
3489 #endif
3490
3491 static int initialize_plane(struct amdgpu_display_manager *dm,
3492                             struct amdgpu_mode_info *mode_info, int plane_id,
3493                             enum drm_plane_type plane_type,
3494                             const struct dc_plane_cap *plane_cap)
3495 {
3496         struct drm_plane *plane;
3497         unsigned long possible_crtcs;
3498         int ret = 0;
3499
3500         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3501         if (!plane) {
3502                 DRM_ERROR("KMS: Failed to allocate plane\n");
3503                 return -ENOMEM;
3504         }
3505         plane->type = plane_type;
3506
3507         /*
3508          * HACK: IGT tests expect that the primary plane for a CRTC
3509          * can only have one possible CRTC. Only expose support for
3510          * any CRTC if they're not going to be used as a primary plane
3511          * for a CRTC - like overlay or underlay planes.
3512          */
3513         possible_crtcs = 1 << plane_id;
3514         if (plane_id >= dm->dc->caps.max_streams)
3515                 possible_crtcs = 0xff;
3516
3517         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3518
3519         if (ret) {
3520                 DRM_ERROR("KMS: Failed to initialize plane\n");
3521                 kfree(plane);
3522                 return ret;
3523         }
3524
3525         if (mode_info)
3526                 mode_info->planes[plane_id] = plane;
3527
3528         return ret;
3529 }
3530
3531
3532 static void register_backlight_device(struct amdgpu_display_manager *dm,
3533                                       struct dc_link *link)
3534 {
3535 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3536         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3537
3538         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3539             link->type != dc_connection_none) {
3540                 /*
3541                  * Event if registration failed, we should continue with
3542                  * DM initialization because not having a backlight control
3543                  * is better then a black screen.
3544                  */
3545                 amdgpu_dm_register_backlight_device(dm);
3546
3547                 if (dm->backlight_dev)
3548                         dm->backlight_link = link;
3549         }
3550 #endif
3551 }
3552
3553
3554 /*
3555  * In this architecture, the association
3556  * connector -> encoder -> crtc
3557  * id not really requried. The crtc and connector will hold the
3558  * display_index as an abstraction to use with DAL component
3559  *
3560  * Returns 0 on success
3561  */
3562 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3563 {
3564         struct amdgpu_display_manager *dm = &adev->dm;
3565         int32_t i;
3566         struct amdgpu_dm_connector *aconnector = NULL;
3567         struct amdgpu_encoder *aencoder = NULL;
3568         struct amdgpu_mode_info *mode_info = &adev->mode_info;
3569         uint32_t link_cnt;
3570         int32_t primary_planes;
3571         enum dc_connection_type new_connection_type = dc_connection_none;
3572         const struct dc_plane_cap *plane;
3573
3574         dm->display_indexes_num = dm->dc->caps.max_streams;
3575         /* Update the actual used number of crtc */
3576         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3577
3578         link_cnt = dm->dc->caps.max_links;
3579         if (amdgpu_dm_mode_config_init(dm->adev)) {
3580                 DRM_ERROR("DM: Failed to initialize mode config\n");
3581                 return -EINVAL;
3582         }
3583
3584         /* There is one primary plane per CRTC */
3585         primary_planes = dm->dc->caps.max_streams;
3586         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3587
3588         /*
3589          * Initialize primary planes, implicit planes for legacy IOCTLS.
3590          * Order is reversed to match iteration order in atomic check.
3591          */
3592         for (i = (primary_planes - 1); i >= 0; i--) {
3593                 plane = &dm->dc->caps.planes[i];
3594
3595                 if (initialize_plane(dm, mode_info, i,
3596                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
3597                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
3598                         goto fail;
3599                 }
3600         }
3601
3602         /*
3603          * Initialize overlay planes, index starting after primary planes.
3604          * These planes have a higher DRM index than the primary planes since
3605          * they should be considered as having a higher z-order.
3606          * Order is reversed to match iteration order in atomic check.
3607          *
3608          * Only support DCN for now, and only expose one so we don't encourage
3609          * userspace to use up all the pipes.
3610          */
3611         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3612                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3613
3614                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3615                         continue;
3616
3617                 if (!plane->blends_with_above || !plane->blends_with_below)
3618                         continue;
3619
3620                 if (!plane->pixel_format_support.argb8888)
3621                         continue;
3622
3623                 if (initialize_plane(dm, NULL, primary_planes + i,
3624                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
3625                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3626                         goto fail;
3627                 }
3628
3629                 /* Only create one overlay plane. */
3630                 break;
3631         }
3632
3633         for (i = 0; i < dm->dc->caps.max_streams; i++)
3634                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3635                         DRM_ERROR("KMS: Failed to initialize crtc\n");
3636                         goto fail;
3637                 }
3638
3639         /* loops over all connectors on the board */
3640         for (i = 0; i < link_cnt; i++) {
3641                 struct dc_link *link = NULL;
3642
3643                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3644                         DRM_ERROR(
3645                                 "KMS: Cannot support more than %d display indexes\n",
3646                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
3647                         continue;
3648                 }
3649
3650                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3651                 if (!aconnector)
3652                         goto fail;
3653
3654                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3655                 if (!aencoder)
3656                         goto fail;
3657
3658                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3659                         DRM_ERROR("KMS: Failed to initialize encoder\n");
3660                         goto fail;
3661                 }
3662
3663                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3664                         DRM_ERROR("KMS: Failed to initialize connector\n");
3665                         goto fail;
3666                 }
3667
3668                 link = dc_get_link_at_index(dm->dc, i);
3669
3670                 if (!dc_link_detect_sink(link, &new_connection_type))
3671                         DRM_ERROR("KMS: Failed to detect connector\n");
3672
3673                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3674                         emulated_link_detect(link);
3675                         amdgpu_dm_update_connector_after_detect(aconnector);
3676
3677                 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3678                         amdgpu_dm_update_connector_after_detect(aconnector);
3679                         register_backlight_device(dm, link);
3680                         if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3681                                 amdgpu_dm_set_psr_caps(link);
3682                 }
3683
3684
3685         }
3686
3687         /* Software is initialized. Now we can register interrupt handlers. */
3688         switch (adev->asic_type) {
3689 #if defined(CONFIG_DRM_AMD_DC_SI)
3690         case CHIP_TAHITI:
3691         case CHIP_PITCAIRN:
3692         case CHIP_VERDE:
3693         case CHIP_OLAND:
3694                 if (dce60_register_irq_handlers(dm->adev)) {
3695                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3696                         goto fail;
3697                 }
3698                 break;
3699 #endif
3700         case CHIP_BONAIRE:
3701         case CHIP_HAWAII:
3702         case CHIP_KAVERI:
3703         case CHIP_KABINI:
3704         case CHIP_MULLINS:
3705         case CHIP_TONGA:
3706         case CHIP_FIJI:
3707         case CHIP_CARRIZO:
3708         case CHIP_STONEY:
3709         case CHIP_POLARIS11:
3710         case CHIP_POLARIS10:
3711         case CHIP_POLARIS12:
3712         case CHIP_VEGAM:
3713         case CHIP_VEGA10:
3714         case CHIP_VEGA12:
3715         case CHIP_VEGA20:
3716                 if (dce110_register_irq_handlers(dm->adev)) {
3717                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3718                         goto fail;
3719                 }
3720                 break;
3721 #if defined(CONFIG_DRM_AMD_DC_DCN)
3722         case CHIP_RAVEN:
3723         case CHIP_NAVI12:
3724         case CHIP_NAVI10:
3725         case CHIP_NAVI14:
3726         case CHIP_RENOIR:
3727         case CHIP_SIENNA_CICHLID:
3728         case CHIP_NAVY_FLOUNDER:
3729         case CHIP_DIMGREY_CAVEFISH:
3730         case CHIP_VANGOGH:
3731                 if (dcn10_register_irq_handlers(dm->adev)) {
3732                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3733                         goto fail;
3734                 }
3735                 break;
3736 #endif
3737         default:
3738                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3739                 goto fail;
3740         }
3741
3742         return 0;
3743 fail:
3744         kfree(aencoder);
3745         kfree(aconnector);
3746
3747         return -EINVAL;
3748 }
3749
3750 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3751 {
3752         drm_mode_config_cleanup(dm->ddev);
3753         drm_atomic_private_obj_fini(&dm->atomic_obj);
3754         return;
3755 }
3756
3757 /******************************************************************************
3758  * amdgpu_display_funcs functions
3759  *****************************************************************************/
3760
3761 /*
3762  * dm_bandwidth_update - program display watermarks
3763  *
3764  * @adev: amdgpu_device pointer
3765  *
3766  * Calculate and program the display watermarks and line buffer allocation.
3767  */
3768 static void dm_bandwidth_update(struct amdgpu_device *adev)
3769 {
3770         /* TODO: implement later */
3771 }
3772
3773 static const struct amdgpu_display_funcs dm_display_funcs = {
3774         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3775         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3776         .backlight_set_level = NULL, /* never called for DC */
3777         .backlight_get_level = NULL, /* never called for DC */
3778         .hpd_sense = NULL,/* called unconditionally */
3779         .hpd_set_polarity = NULL, /* called unconditionally */
3780         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3781         .page_flip_get_scanoutpos =
3782                 dm_crtc_get_scanoutpos,/* called unconditionally */
3783         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3784         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3785 };
3786
3787 #if defined(CONFIG_DEBUG_KERNEL_DC)
3788
3789 static ssize_t s3_debug_store(struct device *device,
3790                               struct device_attribute *attr,
3791                               const char *buf,
3792                               size_t count)
3793 {
3794         int ret;
3795         int s3_state;
3796         struct drm_device *drm_dev = dev_get_drvdata(device);
3797         struct amdgpu_device *adev = drm_to_adev(drm_dev);
3798
3799         ret = kstrtoint(buf, 0, &s3_state);
3800
3801         if (ret == 0) {
3802                 if (s3_state) {
3803                         dm_resume(adev);
3804                         drm_kms_helper_hotplug_event(adev_to_drm(adev));
3805                 } else
3806                         dm_suspend(adev);
3807         }
3808
3809         return ret == 0 ? count : 0;
3810 }
3811
3812 DEVICE_ATTR_WO(s3_debug);
3813
3814 #endif
3815
3816 static int dm_early_init(void *handle)
3817 {
3818         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3819
3820         switch (adev->asic_type) {
3821 #if defined(CONFIG_DRM_AMD_DC_SI)
3822         case CHIP_TAHITI:
3823         case CHIP_PITCAIRN:
3824         case CHIP_VERDE:
3825                 adev->mode_info.num_crtc = 6;
3826                 adev->mode_info.num_hpd = 6;
3827                 adev->mode_info.num_dig = 6;
3828                 break;
3829         case CHIP_OLAND:
3830                 adev->mode_info.num_crtc = 2;
3831                 adev->mode_info.num_hpd = 2;
3832                 adev->mode_info.num_dig = 2;
3833                 break;
3834 #endif
3835         case CHIP_BONAIRE:
3836         case CHIP_HAWAII:
3837                 adev->mode_info.num_crtc = 6;
3838                 adev->mode_info.num_hpd = 6;
3839                 adev->mode_info.num_dig = 6;
3840                 break;
3841         case CHIP_KAVERI:
3842                 adev->mode_info.num_crtc = 4;
3843                 adev->mode_info.num_hpd = 6;
3844                 adev->mode_info.num_dig = 7;
3845                 break;
3846         case CHIP_KABINI:
3847         case CHIP_MULLINS:
3848                 adev->mode_info.num_crtc = 2;
3849                 adev->mode_info.num_hpd = 6;
3850                 adev->mode_info.num_dig = 6;
3851                 break;
3852         case CHIP_FIJI:
3853         case CHIP_TONGA:
3854                 adev->mode_info.num_crtc = 6;
3855                 adev->mode_info.num_hpd = 6;
3856                 adev->mode_info.num_dig = 7;
3857                 break;
3858         case CHIP_CARRIZO:
3859                 adev->mode_info.num_crtc = 3;
3860                 adev->mode_info.num_hpd = 6;
3861                 adev->mode_info.num_dig = 9;
3862                 break;
3863         case CHIP_STONEY:
3864                 adev->mode_info.num_crtc = 2;
3865                 adev->mode_info.num_hpd = 6;
3866                 adev->mode_info.num_dig = 9;
3867                 break;
3868         case CHIP_POLARIS11:
3869         case CHIP_POLARIS12:
3870                 adev->mode_info.num_crtc = 5;
3871                 adev->mode_info.num_hpd = 5;
3872                 adev->mode_info.num_dig = 5;
3873                 break;
3874         case CHIP_POLARIS10:
3875         case CHIP_VEGAM:
3876                 adev->mode_info.num_crtc = 6;
3877                 adev->mode_info.num_hpd = 6;
3878                 adev->mode_info.num_dig = 6;
3879                 break;
3880         case CHIP_VEGA10:
3881         case CHIP_VEGA12:
3882         case CHIP_VEGA20:
3883                 adev->mode_info.num_crtc = 6;
3884                 adev->mode_info.num_hpd = 6;
3885                 adev->mode_info.num_dig = 6;
3886                 break;
3887 #if defined(CONFIG_DRM_AMD_DC_DCN)
3888         case CHIP_RAVEN:
3889         case CHIP_RENOIR:
3890         case CHIP_VANGOGH:
3891                 adev->mode_info.num_crtc = 4;
3892                 adev->mode_info.num_hpd = 4;
3893                 adev->mode_info.num_dig = 4;
3894                 break;
3895         case CHIP_NAVI10:
3896         case CHIP_NAVI12:
3897         case CHIP_SIENNA_CICHLID:
3898         case CHIP_NAVY_FLOUNDER:
3899                 adev->mode_info.num_crtc = 6;
3900                 adev->mode_info.num_hpd = 6;
3901                 adev->mode_info.num_dig = 6;
3902                 break;
3903         case CHIP_NAVI14:
3904         case CHIP_DIMGREY_CAVEFISH:
3905                 adev->mode_info.num_crtc = 5;
3906                 adev->mode_info.num_hpd = 5;
3907                 adev->mode_info.num_dig = 5;
3908                 break;
3909 #endif
3910         default:
3911                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3912                 return -EINVAL;
3913         }
3914
3915         amdgpu_dm_set_irq_funcs(adev);
3916
3917         if (adev->mode_info.funcs == NULL)
3918                 adev->mode_info.funcs = &dm_display_funcs;
3919
3920         /*
3921          * Note: Do NOT change adev->audio_endpt_rreg and
3922          * adev->audio_endpt_wreg because they are initialised in
3923          * amdgpu_device_init()
3924          */
3925 #if defined(CONFIG_DEBUG_KERNEL_DC)
3926         device_create_file(
3927                 adev_to_drm(adev)->dev,
3928                 &dev_attr_s3_debug);
3929 #endif
3930
3931         return 0;
3932 }
3933
3934 static bool modeset_required(struct drm_crtc_state *crtc_state,
3935                              struct dc_stream_state *new_stream,
3936                              struct dc_stream_state *old_stream)
3937 {
3938         return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3939 }
3940
3941 static bool modereset_required(struct drm_crtc_state *crtc_state)
3942 {
3943         return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3944 }
3945
3946 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3947 {
3948         drm_encoder_cleanup(encoder);
3949         kfree(encoder);
3950 }
3951
3952 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3953         .destroy = amdgpu_dm_encoder_destroy,
3954 };
3955
3956
3957 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
3958                                          struct drm_framebuffer *fb,
3959                                          int *min_downscale, int *max_upscale)
3960 {
3961         struct amdgpu_device *adev = drm_to_adev(dev);
3962         struct dc *dc = adev->dm.dc;
3963         /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
3964         struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
3965
3966         switch (fb->format->format) {
3967         case DRM_FORMAT_P010:
3968         case DRM_FORMAT_NV12:
3969         case DRM_FORMAT_NV21:
3970                 *max_upscale = plane_cap->max_upscale_factor.nv12;
3971                 *min_downscale = plane_cap->max_downscale_factor.nv12;
3972                 break;
3973
3974         case DRM_FORMAT_XRGB16161616F:
3975         case DRM_FORMAT_ARGB16161616F:
3976         case DRM_FORMAT_XBGR16161616F:
3977         case DRM_FORMAT_ABGR16161616F:
3978                 *max_upscale = plane_cap->max_upscale_factor.fp16;
3979                 *min_downscale = plane_cap->max_downscale_factor.fp16;
3980                 break;
3981
3982         default:
3983                 *max_upscale = plane_cap->max_upscale_factor.argb8888;
3984                 *min_downscale = plane_cap->max_downscale_factor.argb8888;
3985                 break;
3986         }
3987
3988         /*
3989          * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
3990          * scaling factor of 1.0 == 1000 units.
3991          */
3992         if (*max_upscale == 1)
3993                 *max_upscale = 1000;
3994
3995         if (*min_downscale == 1)
3996                 *min_downscale = 1000;
3997 }
3998
3999
4000 static int fill_dc_scaling_info(const struct drm_plane_state *state,
4001                                 struct dc_scaling_info *scaling_info)
4002 {
4003         int scale_w, scale_h, min_downscale, max_upscale;
4004
4005         memset(scaling_info, 0, sizeof(*scaling_info));
4006
4007         /* Source is fixed 16.16 but we ignore mantissa for now... */
4008         scaling_info->src_rect.x = state->src_x >> 16;
4009         scaling_info->src_rect.y = state->src_y >> 16;
4010
4011         scaling_info->src_rect.width = state->src_w >> 16;
4012         if (scaling_info->src_rect.width == 0)
4013                 return -EINVAL;
4014
4015         scaling_info->src_rect.height = state->src_h >> 16;
4016         if (scaling_info->src_rect.height == 0)
4017                 return -EINVAL;
4018
4019         scaling_info->dst_rect.x = state->crtc_x;
4020         scaling_info->dst_rect.y = state->crtc_y;
4021
4022         if (state->crtc_w == 0)
4023                 return -EINVAL;
4024
4025         scaling_info->dst_rect.width = state->crtc_w;
4026
4027         if (state->crtc_h == 0)
4028                 return -EINVAL;
4029
4030         scaling_info->dst_rect.height = state->crtc_h;
4031
4032         /* DRM doesn't specify clipping on destination output. */
4033         scaling_info->clip_rect = scaling_info->dst_rect;
4034
4035         /* Validate scaling per-format with DC plane caps */
4036         if (state->plane && state->plane->dev && state->fb) {
4037                 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4038                                              &min_downscale, &max_upscale);
4039         } else {
4040                 min_downscale = 250;
4041                 max_upscale = 16000;
4042         }
4043
4044         scale_w = scaling_info->dst_rect.width * 1000 /
4045                   scaling_info->src_rect.width;
4046
4047         if (scale_w < min_downscale || scale_w > max_upscale)
4048                 return -EINVAL;
4049
4050         scale_h = scaling_info->dst_rect.height * 1000 /
4051                   scaling_info->src_rect.height;
4052
4053         if (scale_h < min_downscale || scale_h > max_upscale)
4054                 return -EINVAL;
4055
4056         /*
4057          * The "scaling_quality" can be ignored for now, quality = 0 has DC
4058          * assume reasonable defaults based on the format.
4059          */
4060
4061         return 0;
4062 }
4063
4064 static void
4065 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4066                                  uint64_t tiling_flags)
4067 {
4068         /* Fill GFX8 params */
4069         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4070                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4071
4072                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4073                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4074                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4075                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4076                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4077
4078                 /* XXX fix me for VI */
4079                 tiling_info->gfx8.num_banks = num_banks;
4080                 tiling_info->gfx8.array_mode =
4081                                 DC_ARRAY_2D_TILED_THIN1;
4082                 tiling_info->gfx8.tile_split = tile_split;
4083                 tiling_info->gfx8.bank_width = bankw;
4084                 tiling_info->gfx8.bank_height = bankh;
4085                 tiling_info->gfx8.tile_aspect = mtaspect;
4086                 tiling_info->gfx8.tile_mode =
4087                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4088         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4089                         == DC_ARRAY_1D_TILED_THIN1) {
4090                 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4091         }
4092
4093         tiling_info->gfx8.pipe_config =
4094                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4095 }
4096
4097 static void
4098 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4099                                   union dc_tiling_info *tiling_info)
4100 {
4101         tiling_info->gfx9.num_pipes =
4102                 adev->gfx.config.gb_addr_config_fields.num_pipes;
4103         tiling_info->gfx9.num_banks =
4104                 adev->gfx.config.gb_addr_config_fields.num_banks;
4105         tiling_info->gfx9.pipe_interleave =
4106                 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4107         tiling_info->gfx9.num_shader_engines =
4108                 adev->gfx.config.gb_addr_config_fields.num_se;
4109         tiling_info->gfx9.max_compressed_frags =
4110                 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4111         tiling_info->gfx9.num_rb_per_se =
4112                 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4113         tiling_info->gfx9.shaderEnable = 1;
4114         if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4115             adev->asic_type == CHIP_NAVY_FLOUNDER ||
4116             adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4117             adev->asic_type == CHIP_VANGOGH)
4118                 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4119 }
4120
4121 static int
4122 validate_dcc(struct amdgpu_device *adev,
4123              const enum surface_pixel_format format,
4124              const enum dc_rotation_angle rotation,
4125              const union dc_tiling_info *tiling_info,
4126              const struct dc_plane_dcc_param *dcc,
4127              const struct dc_plane_address *address,
4128              const struct plane_size *plane_size)
4129 {
4130         struct dc *dc = adev->dm.dc;
4131         struct dc_dcc_surface_param input;
4132         struct dc_surface_dcc_cap output;
4133
4134         memset(&input, 0, sizeof(input));
4135         memset(&output, 0, sizeof(output));
4136
4137         if (!dcc->enable)
4138                 return 0;
4139
4140         if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4141             !dc->cap_funcs.get_dcc_compression_cap)
4142                 return -EINVAL;
4143
4144         input.format = format;
4145         input.surface_size.width = plane_size->surface_size.width;
4146         input.surface_size.height = plane_size->surface_size.height;
4147         input.swizzle_mode = tiling_info->gfx9.swizzle;
4148
4149         if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4150                 input.scan = SCAN_DIRECTION_HORIZONTAL;
4151         else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4152                 input.scan = SCAN_DIRECTION_VERTICAL;
4153
4154         if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4155                 return -EINVAL;
4156
4157         if (!output.capable)
4158                 return -EINVAL;
4159
4160         if (dcc->independent_64b_blks == 0 &&
4161             output.grph.rgb.independent_64b_blks != 0)
4162                 return -EINVAL;
4163
4164         return 0;
4165 }
4166
4167 static bool
4168 modifier_has_dcc(uint64_t modifier)
4169 {
4170         return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4171 }
4172
4173 static unsigned
4174 modifier_gfx9_swizzle_mode(uint64_t modifier)
4175 {
4176         if (modifier == DRM_FORMAT_MOD_LINEAR)
4177                 return 0;
4178
4179         return AMD_FMT_MOD_GET(TILE, modifier);
4180 }
4181
4182 static const struct drm_format_info *
4183 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4184 {
4185         return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4186 }
4187
4188 static void
4189 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4190                                     union dc_tiling_info *tiling_info,
4191                                     uint64_t modifier)
4192 {
4193         unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4194         unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4195         unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4196         unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4197
4198         fill_gfx9_tiling_info_from_device(adev, tiling_info);
4199
4200         if (!IS_AMD_FMT_MOD(modifier))
4201                 return;
4202
4203         tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4204         tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4205
4206         if (adev->family >= AMDGPU_FAMILY_NV) {
4207                 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4208         } else {
4209                 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4210
4211                 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4212         }
4213 }
4214
4215 enum dm_micro_swizzle {
4216         MICRO_SWIZZLE_Z = 0,
4217         MICRO_SWIZZLE_S = 1,
4218         MICRO_SWIZZLE_D = 2,
4219         MICRO_SWIZZLE_R = 3
4220 };
4221
4222 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4223                                           uint32_t format,
4224                                           uint64_t modifier)
4225 {
4226         struct amdgpu_device *adev = drm_to_adev(plane->dev);
4227         const struct drm_format_info *info = drm_format_info(format);
4228
4229         enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4230
4231         if (!info)
4232                 return false;
4233
4234         /*
4235          * We always have to allow this modifier, because core DRM still
4236          * checks LINEAR support if userspace does not provide modifers.
4237          */
4238         if (modifier == DRM_FORMAT_MOD_LINEAR)
4239                 return true;
4240
4241         /*
4242          * The arbitrary tiling support for multiplane formats has not been hooked
4243          * up.
4244          */
4245         if (info->num_planes > 1)
4246                 return false;
4247
4248         /*
4249          * For D swizzle the canonical modifier depends on the bpp, so check
4250          * it here.
4251          */
4252         if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4253             adev->family >= AMDGPU_FAMILY_NV) {
4254                 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4255                         return false;
4256         }
4257
4258         if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4259             info->cpp[0] < 8)
4260                 return false;
4261
4262         if (modifier_has_dcc(modifier)) {
4263                 /* Per radeonsi comments 16/64 bpp are more complicated. */
4264                 if (info->cpp[0] != 4)
4265                         return false;
4266         }
4267
4268         return true;
4269 }
4270
4271 static void
4272 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4273 {
4274         if (!*mods)
4275                 return;
4276
4277         if (*cap - *size < 1) {
4278                 uint64_t new_cap = *cap * 2;
4279                 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4280
4281                 if (!new_mods) {
4282                         kfree(*mods);
4283                         *mods = NULL;
4284                         return;
4285                 }
4286
4287                 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4288                 kfree(*mods);
4289                 *mods = new_mods;
4290                 *cap = new_cap;
4291         }
4292
4293         (*mods)[*size] = mod;
4294         *size += 1;
4295 }
4296
4297 static void
4298 add_gfx9_modifiers(const struct amdgpu_device *adev,
4299                    uint64_t **mods, uint64_t *size, uint64_t *capacity)
4300 {
4301         int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4302         int pipe_xor_bits = min(8, pipes +
4303                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4304         int bank_xor_bits = min(8 - pipe_xor_bits,
4305                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4306         int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4307                  ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4308
4309
4310         if (adev->family == AMDGPU_FAMILY_RV) {
4311                 /* Raven2 and later */
4312                 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4313
4314                 /*
4315                  * No _D DCC swizzles yet because we only allow 32bpp, which
4316                  * doesn't support _D on DCN
4317                  */
4318
4319                 if (has_constant_encode) {
4320                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4321                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4322                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4323                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4324                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4325                                     AMD_FMT_MOD_SET(DCC, 1) |
4326                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4327                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4328                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4329                 }
4330
4331                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4332                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4333                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4334                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4335                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4336                             AMD_FMT_MOD_SET(DCC, 1) |
4337                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4338                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4339                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4340
4341                 if (has_constant_encode) {
4342                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4343                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4344                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4345                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4346                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4347                                     AMD_FMT_MOD_SET(DCC, 1) |
4348                                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4349                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4350                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4351
4352                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4353                                     AMD_FMT_MOD_SET(RB, rb) |
4354                                     AMD_FMT_MOD_SET(PIPE, pipes));
4355                 }
4356
4357                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4358                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4359                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4360                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4361                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4362                             AMD_FMT_MOD_SET(DCC, 1) |
4363                             AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4364                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4365                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4366                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4367                             AMD_FMT_MOD_SET(RB, rb) |
4368                             AMD_FMT_MOD_SET(PIPE, pipes));
4369         }
4370
4371         /*
4372          * Only supported for 64bpp on Raven, will be filtered on format in
4373          * dm_plane_format_mod_supported.
4374          */
4375         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4376                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4377                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4378                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4379                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4380
4381         if (adev->family == AMDGPU_FAMILY_RV) {
4382                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4383                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4384                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4385                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4386                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4387         }
4388
4389         /*
4390          * Only supported for 64bpp on Raven, will be filtered on format in
4391          * dm_plane_format_mod_supported.
4392          */
4393         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4394                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4395                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4396
4397         if (adev->family == AMDGPU_FAMILY_RV) {
4398                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4399                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4400                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4401         }
4402 }
4403
4404 static void
4405 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4406                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
4407 {
4408         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4409
4410         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4411                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4412                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4413                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4414                     AMD_FMT_MOD_SET(DCC, 1) |
4415                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4416                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4417                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4418
4419         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4420                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4421                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4422                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4423                     AMD_FMT_MOD_SET(DCC, 1) |
4424                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4425                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4426                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4427                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4428
4429         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4430                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4431                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4432                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4433
4434         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4435                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4436                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4437                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4438
4439
4440         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4441         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4442                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4443                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4444
4445         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4446                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4447                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4448 }
4449
4450 static void
4451 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4452                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
4453 {
4454         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4455         int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4456
4457         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4458                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4459                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4460                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4461                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
4462                     AMD_FMT_MOD_SET(DCC, 1) |
4463                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4464                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4465                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4466                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4467
4468         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4469                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4470                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4471                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4472                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
4473                     AMD_FMT_MOD_SET(DCC, 1) |
4474                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4475                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4476                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4477                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4478                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4479
4480         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4481                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4482                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4483                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4484                     AMD_FMT_MOD_SET(PACKERS, pkrs));
4485
4486         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4487                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4488                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4489                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4490                     AMD_FMT_MOD_SET(PACKERS, pkrs));
4491
4492         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4493         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4494                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4495                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4496
4497         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4498                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4499                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4500 }
4501
4502 static int
4503 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4504 {
4505         uint64_t size = 0, capacity = 128;
4506         *mods = NULL;
4507
4508         /* We have not hooked up any pre-GFX9 modifiers. */
4509         if (adev->family < AMDGPU_FAMILY_AI)
4510                 return 0;
4511
4512         *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4513
4514         if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4515                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4516                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4517                 return *mods ? 0 : -ENOMEM;
4518         }
4519
4520         switch (adev->family) {
4521         case AMDGPU_FAMILY_AI:
4522         case AMDGPU_FAMILY_RV:
4523                 add_gfx9_modifiers(adev, mods, &size, &capacity);
4524                 break;
4525         case AMDGPU_FAMILY_NV:
4526         case AMDGPU_FAMILY_VGH:
4527                 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4528                         add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4529                 else
4530                         add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4531                 break;
4532         }
4533
4534         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4535
4536         /* INVALID marks the end of the list. */
4537         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4538
4539         if (!*mods)
4540                 return -ENOMEM;
4541
4542         return 0;
4543 }
4544
4545 static int
4546 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4547                                           const struct amdgpu_framebuffer *afb,
4548                                           const enum surface_pixel_format format,
4549                                           const enum dc_rotation_angle rotation,
4550                                           const struct plane_size *plane_size,
4551                                           union dc_tiling_info *tiling_info,
4552                                           struct dc_plane_dcc_param *dcc,
4553                                           struct dc_plane_address *address,
4554                                           const bool force_disable_dcc)
4555 {
4556         const uint64_t modifier = afb->base.modifier;
4557         int ret;
4558
4559         fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4560         tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4561
4562         if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4563                 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4564
4565                 dcc->enable = 1;
4566                 dcc->meta_pitch = afb->base.pitches[1];
4567                 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4568
4569                 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4570                 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4571         }
4572
4573         ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4574         if (ret)
4575                 return ret;
4576
4577         return 0;
4578 }
4579
4580 static int
4581 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4582                              const struct amdgpu_framebuffer *afb,
4583                              const enum surface_pixel_format format,
4584                              const enum dc_rotation_angle rotation,
4585                              const uint64_t tiling_flags,
4586                              union dc_tiling_info *tiling_info,
4587                              struct plane_size *plane_size,
4588                              struct dc_plane_dcc_param *dcc,
4589                              struct dc_plane_address *address,
4590                              bool tmz_surface,
4591                              bool force_disable_dcc)
4592 {
4593         const struct drm_framebuffer *fb = &afb->base;
4594         int ret;
4595
4596         memset(tiling_info, 0, sizeof(*tiling_info));
4597         memset(plane_size, 0, sizeof(*plane_size));
4598         memset(dcc, 0, sizeof(*dcc));
4599         memset(address, 0, sizeof(*address));
4600
4601         address->tmz_surface = tmz_surface;
4602
4603         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4604                 uint64_t addr = afb->address + fb->offsets[0];
4605
4606                 plane_size->surface_size.x = 0;
4607                 plane_size->surface_size.y = 0;
4608                 plane_size->surface_size.width = fb->width;
4609                 plane_size->surface_size.height = fb->height;
4610                 plane_size->surface_pitch =
4611                         fb->pitches[0] / fb->format->cpp[0];
4612
4613                 address->type = PLN_ADDR_TYPE_GRAPHICS;
4614                 address->grph.addr.low_part = lower_32_bits(addr);
4615                 address->grph.addr.high_part = upper_32_bits(addr);
4616         } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4617                 uint64_t luma_addr = afb->address + fb->offsets[0];
4618                 uint64_t chroma_addr = afb->address + fb->offsets[1];
4619
4620                 plane_size->surface_size.x = 0;
4621                 plane_size->surface_size.y = 0;
4622                 plane_size->surface_size.width = fb->width;
4623                 plane_size->surface_size.height = fb->height;
4624                 plane_size->surface_pitch =
4625                         fb->pitches[0] / fb->format->cpp[0];
4626
4627                 plane_size->chroma_size.x = 0;
4628                 plane_size->chroma_size.y = 0;
4629                 /* TODO: set these based on surface format */
4630                 plane_size->chroma_size.width = fb->width / 2;
4631                 plane_size->chroma_size.height = fb->height / 2;
4632
4633                 plane_size->chroma_pitch =
4634                         fb->pitches[1] / fb->format->cpp[1];
4635
4636                 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4637                 address->video_progressive.luma_addr.low_part =
4638                         lower_32_bits(luma_addr);
4639                 address->video_progressive.luma_addr.high_part =
4640                         upper_32_bits(luma_addr);
4641                 address->video_progressive.chroma_addr.low_part =
4642                         lower_32_bits(chroma_addr);
4643                 address->video_progressive.chroma_addr.high_part =
4644                         upper_32_bits(chroma_addr);
4645         }
4646
4647         if (adev->family >= AMDGPU_FAMILY_AI) {
4648                 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4649                                                                 rotation, plane_size,
4650                                                                 tiling_info, dcc,
4651                                                                 address,
4652                                                                 force_disable_dcc);
4653                 if (ret)
4654                         return ret;
4655         } else {
4656                 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4657         }
4658
4659         return 0;
4660 }
4661
4662 static void
4663 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4664                                bool *per_pixel_alpha, bool *global_alpha,
4665                                int *global_alpha_value)
4666 {
4667         *per_pixel_alpha = false;
4668         *global_alpha = false;
4669         *global_alpha_value = 0xff;
4670
4671         if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4672                 return;
4673
4674         if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4675                 static const uint32_t alpha_formats[] = {
4676                         DRM_FORMAT_ARGB8888,
4677                         DRM_FORMAT_RGBA8888,
4678                         DRM_FORMAT_ABGR8888,
4679                 };
4680                 uint32_t format = plane_state->fb->format->format;
4681                 unsigned int i;
4682
4683                 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4684                         if (format == alpha_formats[i]) {
4685                                 *per_pixel_alpha = true;
4686                                 break;
4687                         }
4688                 }
4689         }
4690
4691         if (plane_state->alpha < 0xffff) {
4692                 *global_alpha = true;
4693                 *global_alpha_value = plane_state->alpha >> 8;
4694         }
4695 }
4696
4697 static int
4698 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4699                             const enum surface_pixel_format format,
4700                             enum dc_color_space *color_space)
4701 {
4702         bool full_range;
4703
4704         *color_space = COLOR_SPACE_SRGB;
4705
4706         /* DRM color properties only affect non-RGB formats. */
4707         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4708                 return 0;
4709
4710         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4711
4712         switch (plane_state->color_encoding) {
4713         case DRM_COLOR_YCBCR_BT601:
4714                 if (full_range)
4715                         *color_space = COLOR_SPACE_YCBCR601;
4716                 else
4717                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4718                 break;
4719
4720         case DRM_COLOR_YCBCR_BT709:
4721                 if (full_range)
4722                         *color_space = COLOR_SPACE_YCBCR709;
4723                 else
4724                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4725                 break;
4726
4727         case DRM_COLOR_YCBCR_BT2020:
4728                 if (full_range)
4729                         *color_space = COLOR_SPACE_2020_YCBCR;
4730                 else
4731                         return -EINVAL;
4732                 break;
4733
4734         default:
4735                 return -EINVAL;
4736         }
4737
4738         return 0;
4739 }
4740
4741 static int
4742 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4743                             const struct drm_plane_state *plane_state,
4744                             const uint64_t tiling_flags,
4745                             struct dc_plane_info *plane_info,
4746                             struct dc_plane_address *address,
4747                             bool tmz_surface,
4748                             bool force_disable_dcc)
4749 {
4750         const struct drm_framebuffer *fb = plane_state->fb;
4751         const struct amdgpu_framebuffer *afb =
4752                 to_amdgpu_framebuffer(plane_state->fb);
4753         int ret;
4754
4755         memset(plane_info, 0, sizeof(*plane_info));
4756
4757         switch (fb->format->format) {
4758         case DRM_FORMAT_C8:
4759                 plane_info->format =
4760                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4761                 break;
4762         case DRM_FORMAT_RGB565:
4763                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4764                 break;
4765         case DRM_FORMAT_XRGB8888:
4766         case DRM_FORMAT_ARGB8888:
4767                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4768                 break;
4769         case DRM_FORMAT_XRGB2101010:
4770         case DRM_FORMAT_ARGB2101010:
4771                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4772                 break;
4773         case DRM_FORMAT_XBGR2101010:
4774         case DRM_FORMAT_ABGR2101010:
4775                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4776                 break;
4777         case DRM_FORMAT_XBGR8888:
4778         case DRM_FORMAT_ABGR8888:
4779                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4780                 break;
4781         case DRM_FORMAT_NV21:
4782                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4783                 break;
4784         case DRM_FORMAT_NV12:
4785                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4786                 break;
4787         case DRM_FORMAT_P010:
4788                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4789                 break;
4790         case DRM_FORMAT_XRGB16161616F:
4791         case DRM_FORMAT_ARGB16161616F:
4792                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4793                 break;
4794         case DRM_FORMAT_XBGR16161616F:
4795         case DRM_FORMAT_ABGR16161616F:
4796                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4797                 break;
4798         default:
4799                 DRM_ERROR(
4800                         "Unsupported screen format %p4cc\n",
4801                         &fb->format->format);
4802                 return -EINVAL;
4803         }
4804
4805         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4806         case DRM_MODE_ROTATE_0:
4807                 plane_info->rotation = ROTATION_ANGLE_0;
4808                 break;
4809         case DRM_MODE_ROTATE_90:
4810                 plane_info->rotation = ROTATION_ANGLE_90;
4811                 break;
4812         case DRM_MODE_ROTATE_180:
4813                 plane_info->rotation = ROTATION_ANGLE_180;
4814                 break;
4815         case DRM_MODE_ROTATE_270:
4816                 plane_info->rotation = ROTATION_ANGLE_270;
4817                 break;
4818         default:
4819                 plane_info->rotation = ROTATION_ANGLE_0;
4820                 break;
4821         }
4822
4823         plane_info->visible = true;
4824         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4825
4826         plane_info->layer_index = 0;
4827
4828         ret = fill_plane_color_attributes(plane_state, plane_info->format,
4829                                           &plane_info->color_space);
4830         if (ret)
4831                 return ret;
4832
4833         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4834                                            plane_info->rotation, tiling_flags,
4835                                            &plane_info->tiling_info,
4836                                            &plane_info->plane_size,
4837                                            &plane_info->dcc, address, tmz_surface,
4838                                            force_disable_dcc);
4839         if (ret)
4840                 return ret;
4841
4842         fill_blending_from_plane_state(
4843                 plane_state, &plane_info->per_pixel_alpha,
4844                 &plane_info->global_alpha, &plane_info->global_alpha_value);
4845
4846         return 0;
4847 }
4848
4849 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4850                                     struct dc_plane_state *dc_plane_state,
4851                                     struct drm_plane_state *plane_state,
4852                                     struct drm_crtc_state *crtc_state)
4853 {
4854         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4855         struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4856         struct dc_scaling_info scaling_info;
4857         struct dc_plane_info plane_info;
4858         int ret;
4859         bool force_disable_dcc = false;
4860
4861         ret = fill_dc_scaling_info(plane_state, &scaling_info);
4862         if (ret)
4863                 return ret;
4864
4865         dc_plane_state->src_rect = scaling_info.src_rect;
4866         dc_plane_state->dst_rect = scaling_info.dst_rect;
4867         dc_plane_state->clip_rect = scaling_info.clip_rect;
4868         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4869
4870         force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4871         ret = fill_dc_plane_info_and_addr(adev, plane_state,
4872                                           afb->tiling_flags,
4873                                           &plane_info,
4874                                           &dc_plane_state->address,
4875                                           afb->tmz_surface,
4876                                           force_disable_dcc);
4877         if (ret)
4878                 return ret;
4879
4880         dc_plane_state->format = plane_info.format;
4881         dc_plane_state->color_space = plane_info.color_space;
4882         dc_plane_state->format = plane_info.format;
4883         dc_plane_state->plane_size = plane_info.plane_size;
4884         dc_plane_state->rotation = plane_info.rotation;
4885         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4886         dc_plane_state->stereo_format = plane_info.stereo_format;
4887         dc_plane_state->tiling_info = plane_info.tiling_info;
4888         dc_plane_state->visible = plane_info.visible;
4889         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4890         dc_plane_state->global_alpha = plane_info.global_alpha;
4891         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4892         dc_plane_state->dcc = plane_info.dcc;
4893         dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4894         dc_plane_state->flip_int_enabled = true;
4895
4896         /*
4897          * Always set input transfer function, since plane state is refreshed
4898          * every time.
4899          */
4900         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4901         if (ret)
4902                 return ret;
4903
4904         return 0;
4905 }
4906
4907 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4908                                            const struct dm_connector_state *dm_state,
4909                                            struct dc_stream_state *stream)
4910 {
4911         enum amdgpu_rmx_type rmx_type;
4912
4913         struct rect src = { 0 }; /* viewport in composition space*/
4914         struct rect dst = { 0 }; /* stream addressable area */
4915
4916         /* no mode. nothing to be done */
4917         if (!mode)
4918                 return;
4919
4920         /* Full screen scaling by default */
4921         src.width = mode->hdisplay;
4922         src.height = mode->vdisplay;
4923         dst.width = stream->timing.h_addressable;
4924         dst.height = stream->timing.v_addressable;
4925
4926         if (dm_state) {
4927                 rmx_type = dm_state->scaling;
4928                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4929                         if (src.width * dst.height <
4930                                         src.height * dst.width) {
4931                                 /* height needs less upscaling/more downscaling */
4932                                 dst.width = src.width *
4933                                                 dst.height / src.height;
4934                         } else {
4935                                 /* width needs less upscaling/more downscaling */
4936                                 dst.height = src.height *
4937                                                 dst.width / src.width;
4938                         }
4939                 } else if (rmx_type == RMX_CENTER) {
4940                         dst = src;
4941                 }
4942
4943                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4944                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
4945
4946                 if (dm_state->underscan_enable) {
4947                         dst.x += dm_state->underscan_hborder / 2;
4948                         dst.y += dm_state->underscan_vborder / 2;
4949                         dst.width -= dm_state->underscan_hborder;
4950                         dst.height -= dm_state->underscan_vborder;
4951                 }
4952         }
4953
4954         stream->src = src;
4955         stream->dst = dst;
4956
4957         DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4958                       dst.x, dst.y, dst.width, dst.height);
4959
4960 }
4961
4962 static enum dc_color_depth
4963 convert_color_depth_from_display_info(const struct drm_connector *connector,
4964                                       bool is_y420, int requested_bpc)
4965 {
4966         uint8_t bpc;
4967
4968         if (is_y420) {
4969                 bpc = 8;
4970
4971                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4972                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4973                         bpc = 16;
4974                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4975                         bpc = 12;
4976                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4977                         bpc = 10;
4978         } else {
4979                 bpc = (uint8_t)connector->display_info.bpc;
4980                 /* Assume 8 bpc by default if no bpc is specified. */
4981                 bpc = bpc ? bpc : 8;
4982         }
4983
4984         if (requested_bpc > 0) {
4985                 /*
4986                  * Cap display bpc based on the user requested value.
4987                  *
4988                  * The value for state->max_bpc may not correctly updated
4989                  * depending on when the connector gets added to the state
4990                  * or if this was called outside of atomic check, so it
4991                  * can't be used directly.
4992                  */
4993                 bpc = min_t(u8, bpc, requested_bpc);
4994
4995                 /* Round down to the nearest even number. */
4996                 bpc = bpc - (bpc & 1);
4997         }
4998
4999         switch (bpc) {
5000         case 0:
5001                 /*
5002                  * Temporary Work around, DRM doesn't parse color depth for
5003                  * EDID revision before 1.4
5004                  * TODO: Fix edid parsing
5005                  */
5006                 return COLOR_DEPTH_888;
5007         case 6:
5008                 return COLOR_DEPTH_666;
5009         case 8:
5010                 return COLOR_DEPTH_888;
5011         case 10:
5012                 return COLOR_DEPTH_101010;
5013         case 12:
5014                 return COLOR_DEPTH_121212;
5015         case 14:
5016                 return COLOR_DEPTH_141414;
5017         case 16:
5018                 return COLOR_DEPTH_161616;
5019         default:
5020                 return COLOR_DEPTH_UNDEFINED;
5021         }
5022 }
5023
5024 static enum dc_aspect_ratio
5025 get_aspect_ratio(const struct drm_display_mode *mode_in)
5026 {
5027         /* 1-1 mapping, since both enums follow the HDMI spec. */
5028         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5029 }
5030
5031 static enum dc_color_space
5032 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5033 {
5034         enum dc_color_space color_space = COLOR_SPACE_SRGB;
5035
5036         switch (dc_crtc_timing->pixel_encoding) {
5037         case PIXEL_ENCODING_YCBCR422:
5038         case PIXEL_ENCODING_YCBCR444:
5039         case PIXEL_ENCODING_YCBCR420:
5040         {
5041                 /*
5042                  * 27030khz is the separation point between HDTV and SDTV
5043                  * according to HDMI spec, we use YCbCr709 and YCbCr601
5044                  * respectively
5045                  */
5046                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
5047                         if (dc_crtc_timing->flags.Y_ONLY)
5048                                 color_space =
5049                                         COLOR_SPACE_YCBCR709_LIMITED;
5050                         else
5051                                 color_space = COLOR_SPACE_YCBCR709;
5052                 } else {
5053                         if (dc_crtc_timing->flags.Y_ONLY)
5054                                 color_space =
5055                                         COLOR_SPACE_YCBCR601_LIMITED;
5056                         else
5057                                 color_space = COLOR_SPACE_YCBCR601;
5058                 }
5059
5060         }
5061         break;
5062         case PIXEL_ENCODING_RGB:
5063                 color_space = COLOR_SPACE_SRGB;
5064                 break;
5065
5066         default:
5067                 WARN_ON(1);
5068                 break;
5069         }
5070
5071         return color_space;
5072 }
5073
5074 static bool adjust_colour_depth_from_display_info(
5075         struct dc_crtc_timing *timing_out,
5076         const struct drm_display_info *info)
5077 {
5078         enum dc_color_depth depth = timing_out->display_color_depth;
5079         int normalized_clk;
5080         do {
5081                 normalized_clk = timing_out->pix_clk_100hz / 10;
5082                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5083                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5084                         normalized_clk /= 2;
5085                 /* Adjusting pix clock following on HDMI spec based on colour depth */
5086                 switch (depth) {
5087                 case COLOR_DEPTH_888:
5088                         break;
5089                 case COLOR_DEPTH_101010:
5090                         normalized_clk = (normalized_clk * 30) / 24;
5091                         break;
5092                 case COLOR_DEPTH_121212:
5093                         normalized_clk = (normalized_clk * 36) / 24;
5094                         break;
5095                 case COLOR_DEPTH_161616:
5096                         normalized_clk = (normalized_clk * 48) / 24;
5097                         break;
5098                 default:
5099                         /* The above depths are the only ones valid for HDMI. */
5100                         return false;
5101                 }
5102                 if (normalized_clk <= info->max_tmds_clock) {
5103                         timing_out->display_color_depth = depth;
5104                         return true;
5105                 }
5106         } while (--depth > COLOR_DEPTH_666);
5107         return false;
5108 }
5109
5110 static void fill_stream_properties_from_drm_display_mode(
5111         struct dc_stream_state *stream,
5112         const struct drm_display_mode *mode_in,
5113         const struct drm_connector *connector,
5114         const struct drm_connector_state *connector_state,
5115         const struct dc_stream_state *old_stream,
5116         int requested_bpc)
5117 {
5118         struct dc_crtc_timing *timing_out = &stream->timing;
5119         const struct drm_display_info *info = &connector->display_info;
5120         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5121         struct hdmi_vendor_infoframe hv_frame;
5122         struct hdmi_avi_infoframe avi_frame;
5123
5124         memset(&hv_frame, 0, sizeof(hv_frame));
5125         memset(&avi_frame, 0, sizeof(avi_frame));
5126
5127         timing_out->h_border_left = 0;
5128         timing_out->h_border_right = 0;
5129         timing_out->v_border_top = 0;
5130         timing_out->v_border_bottom = 0;
5131         /* TODO: un-hardcode */
5132         if (drm_mode_is_420_only(info, mode_in)
5133                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5134                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5135         else if (drm_mode_is_420_also(info, mode_in)
5136                         && aconnector->force_yuv420_output)
5137                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5138         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5139                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5140                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5141         else
5142                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5143
5144         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5145         timing_out->display_color_depth = convert_color_depth_from_display_info(
5146                 connector,
5147                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5148                 requested_bpc);
5149         timing_out->scan_type = SCANNING_TYPE_NODATA;
5150         timing_out->hdmi_vic = 0;
5151
5152         if(old_stream) {
5153                 timing_out->vic = old_stream->timing.vic;
5154                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5155                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5156         } else {
5157                 timing_out->vic = drm_match_cea_mode(mode_in);
5158                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5159                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5160                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5161                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5162         }
5163
5164         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5165                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5166                 timing_out->vic = avi_frame.video_code;
5167                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5168                 timing_out->hdmi_vic = hv_frame.vic;
5169         }
5170
5171         if (is_freesync_video_mode(mode_in, aconnector)) {
5172                 timing_out->h_addressable = mode_in->hdisplay;
5173                 timing_out->h_total = mode_in->htotal;
5174                 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5175                 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5176                 timing_out->v_total = mode_in->vtotal;
5177                 timing_out->v_addressable = mode_in->vdisplay;
5178                 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5179                 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5180                 timing_out->pix_clk_100hz = mode_in->clock * 10;
5181         } else {
5182                 timing_out->h_addressable = mode_in->crtc_hdisplay;
5183                 timing_out->h_total = mode_in->crtc_htotal;
5184                 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5185                 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5186                 timing_out->v_total = mode_in->crtc_vtotal;
5187                 timing_out->v_addressable = mode_in->crtc_vdisplay;
5188                 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5189                 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5190                 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5191         }
5192
5193         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5194
5195         stream->output_color_space = get_output_color_space(timing_out);
5196
5197         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5198         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5199         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5200                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5201                     drm_mode_is_420_also(info, mode_in) &&
5202                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5203                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5204                         adjust_colour_depth_from_display_info(timing_out, info);
5205                 }
5206         }
5207 }
5208
5209 static void fill_audio_info(struct audio_info *audio_info,
5210                             const struct drm_connector *drm_connector,
5211                             const struct dc_sink *dc_sink)
5212 {
5213         int i = 0;
5214         int cea_revision = 0;
5215         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5216
5217         audio_info->manufacture_id = edid_caps->manufacturer_id;
5218         audio_info->product_id = edid_caps->product_id;
5219
5220         cea_revision = drm_connector->display_info.cea_rev;
5221
5222         strscpy(audio_info->display_name,
5223                 edid_caps->display_name,
5224                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5225
5226         if (cea_revision >= 3) {
5227                 audio_info->mode_count = edid_caps->audio_mode_count;
5228
5229                 for (i = 0; i < audio_info->mode_count; ++i) {
5230                         audio_info->modes[i].format_code =
5231                                         (enum audio_format_code)
5232                                         (edid_caps->audio_modes[i].format_code);
5233                         audio_info->modes[i].channel_count =
5234                                         edid_caps->audio_modes[i].channel_count;
5235                         audio_info->modes[i].sample_rates.all =
5236                                         edid_caps->audio_modes[i].sample_rate;
5237                         audio_info->modes[i].sample_size =
5238                                         edid_caps->audio_modes[i].sample_size;
5239                 }
5240         }
5241
5242         audio_info->flags.all = edid_caps->speaker_flags;
5243
5244         /* TODO: We only check for the progressive mode, check for interlace mode too */
5245         if (drm_connector->latency_present[0]) {
5246                 audio_info->video_latency = drm_connector->video_latency[0];
5247                 audio_info->audio_latency = drm_connector->audio_latency[0];
5248         }
5249
5250         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5251
5252 }
5253
5254 static void
5255 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5256                                       struct drm_display_mode *dst_mode)
5257 {
5258         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5259         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5260         dst_mode->crtc_clock = src_mode->crtc_clock;
5261         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5262         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5263         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5264         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5265         dst_mode->crtc_htotal = src_mode->crtc_htotal;
5266         dst_mode->crtc_hskew = src_mode->crtc_hskew;
5267         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5268         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5269         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5270         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5271         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5272 }
5273
5274 static void
5275 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5276                                         const struct drm_display_mode *native_mode,
5277                                         bool scale_enabled)
5278 {
5279         if (scale_enabled) {
5280                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5281         } else if (native_mode->clock == drm_mode->clock &&
5282                         native_mode->htotal == drm_mode->htotal &&
5283                         native_mode->vtotal == drm_mode->vtotal) {
5284                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5285         } else {
5286                 /* no scaling nor amdgpu inserted, no need to patch */
5287         }
5288 }
5289
5290 static struct dc_sink *
5291 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5292 {
5293         struct dc_sink_init_data sink_init_data = { 0 };
5294         struct dc_sink *sink = NULL;
5295         sink_init_data.link = aconnector->dc_link;
5296         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5297
5298         sink = dc_sink_create(&sink_init_data);
5299         if (!sink) {
5300                 DRM_ERROR("Failed to create sink!\n");
5301                 return NULL;
5302         }
5303         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5304
5305         return sink;
5306 }
5307
5308 static void set_multisync_trigger_params(
5309                 struct dc_stream_state *stream)
5310 {
5311         struct dc_stream_state *master = NULL;
5312
5313         if (stream->triggered_crtc_reset.enabled) {
5314                 master = stream->triggered_crtc_reset.event_source;
5315                 stream->triggered_crtc_reset.event =
5316                         master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5317                         CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5318                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5319         }
5320 }
5321
5322 static void set_master_stream(struct dc_stream_state *stream_set[],
5323                               int stream_count)
5324 {
5325         int j, highest_rfr = 0, master_stream = 0;
5326
5327         for (j = 0;  j < stream_count; j++) {
5328                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5329                         int refresh_rate = 0;
5330
5331                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5332                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5333                         if (refresh_rate > highest_rfr) {
5334                                 highest_rfr = refresh_rate;
5335                                 master_stream = j;
5336                         }
5337                 }
5338         }
5339         for (j = 0;  j < stream_count; j++) {
5340                 if (stream_set[j])
5341                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5342         }
5343 }
5344
5345 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5346 {
5347         int i = 0;
5348         struct dc_stream_state *stream;
5349
5350         if (context->stream_count < 2)
5351                 return;
5352         for (i = 0; i < context->stream_count ; i++) {
5353                 if (!context->streams[i])
5354                         continue;
5355                 /*
5356                  * TODO: add a function to read AMD VSDB bits and set
5357                  * crtc_sync_master.multi_sync_enabled flag
5358                  * For now it's set to false
5359                  */
5360         }
5361
5362         set_master_stream(context->streams, context->stream_count);
5363
5364         for (i = 0; i < context->stream_count ; i++) {
5365                 stream = context->streams[i];
5366
5367                 if (!stream)
5368                         continue;
5369
5370                 set_multisync_trigger_params(stream);
5371         }
5372 }
5373
5374 static struct drm_display_mode *
5375 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5376                           bool use_probed_modes)
5377 {
5378         struct drm_display_mode *m, *m_pref = NULL;
5379         u16 current_refresh, highest_refresh;
5380         struct list_head *list_head = use_probed_modes ?
5381                                                     &aconnector->base.probed_modes :
5382                                                     &aconnector->base.modes;
5383
5384         if (aconnector->freesync_vid_base.clock != 0)
5385                 return &aconnector->freesync_vid_base;
5386
5387         /* Find the preferred mode */
5388         list_for_each_entry (m, list_head, head) {
5389                 if (m->type & DRM_MODE_TYPE_PREFERRED) {
5390                         m_pref = m;
5391                         break;
5392                 }
5393         }
5394
5395         if (!m_pref) {
5396                 /* Probably an EDID with no preferred mode. Fallback to first entry */
5397                 m_pref = list_first_entry_or_null(
5398                         &aconnector->base.modes, struct drm_display_mode, head);
5399                 if (!m_pref) {
5400                         DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5401                         return NULL;
5402                 }
5403         }
5404
5405         highest_refresh = drm_mode_vrefresh(m_pref);
5406
5407         /*
5408          * Find the mode with highest refresh rate with same resolution.
5409          * For some monitors, preferred mode is not the mode with highest
5410          * supported refresh rate.
5411          */
5412         list_for_each_entry (m, list_head, head) {
5413                 current_refresh  = drm_mode_vrefresh(m);
5414
5415                 if (m->hdisplay == m_pref->hdisplay &&
5416                     m->vdisplay == m_pref->vdisplay &&
5417                     highest_refresh < current_refresh) {
5418                         highest_refresh = current_refresh;
5419                         m_pref = m;
5420                 }
5421         }
5422
5423         aconnector->freesync_vid_base = *m_pref;
5424         return m_pref;
5425 }
5426
5427 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
5428                                    struct amdgpu_dm_connector *aconnector)
5429 {
5430         struct drm_display_mode *high_mode;
5431         int timing_diff;
5432
5433         high_mode = get_highest_refresh_rate_mode(aconnector, false);
5434         if (!high_mode || !mode)
5435                 return false;
5436
5437         timing_diff = high_mode->vtotal - mode->vtotal;
5438
5439         if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5440             high_mode->hdisplay != mode->hdisplay ||
5441             high_mode->vdisplay != mode->vdisplay ||
5442             high_mode->hsync_start != mode->hsync_start ||
5443             high_mode->hsync_end != mode->hsync_end ||
5444             high_mode->htotal != mode->htotal ||
5445             high_mode->hskew != mode->hskew ||
5446             high_mode->vscan != mode->vscan ||
5447             high_mode->vsync_start - mode->vsync_start != timing_diff ||
5448             high_mode->vsync_end - mode->vsync_end != timing_diff)
5449                 return false;
5450         else
5451                 return true;
5452 }
5453
5454 static struct dc_stream_state *
5455 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5456                        const struct drm_display_mode *drm_mode,
5457                        const struct dm_connector_state *dm_state,
5458                        const struct dc_stream_state *old_stream,
5459                        int requested_bpc)
5460 {
5461         struct drm_display_mode *preferred_mode = NULL;
5462         struct drm_connector *drm_connector;
5463         const struct drm_connector_state *con_state =
5464                 dm_state ? &dm_state->base : NULL;
5465         struct dc_stream_state *stream = NULL;
5466         struct drm_display_mode mode = *drm_mode;
5467         struct drm_display_mode saved_mode;
5468         struct drm_display_mode *freesync_mode = NULL;
5469         bool native_mode_found = false;
5470         bool recalculate_timing = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5471         int mode_refresh;
5472         int preferred_refresh = 0;
5473 #if defined(CONFIG_DRM_AMD_DC_DCN)
5474         struct dsc_dec_dpcd_caps dsc_caps;
5475         uint32_t link_bandwidth_kbps;
5476 #endif
5477         struct dc_sink *sink = NULL;
5478
5479         memset(&saved_mode, 0, sizeof(saved_mode));
5480
5481         if (aconnector == NULL) {
5482                 DRM_ERROR("aconnector is NULL!\n");
5483                 return stream;
5484         }
5485
5486         drm_connector = &aconnector->base;
5487
5488         if (!aconnector->dc_sink) {
5489                 sink = create_fake_sink(aconnector);
5490                 if (!sink)
5491                         return stream;
5492         } else {
5493                 sink = aconnector->dc_sink;
5494                 dc_sink_retain(sink);
5495         }
5496
5497         stream = dc_create_stream_for_sink(sink);
5498
5499         if (stream == NULL) {
5500                 DRM_ERROR("Failed to create stream for sink!\n");
5501                 goto finish;
5502         }
5503
5504         stream->dm_stream_context = aconnector;
5505
5506         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5507                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5508
5509         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5510                 /* Search for preferred mode */
5511                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5512                         native_mode_found = true;
5513                         break;
5514                 }
5515         }
5516         if (!native_mode_found)
5517                 preferred_mode = list_first_entry_or_null(
5518                                 &aconnector->base.modes,
5519                                 struct drm_display_mode,
5520                                 head);
5521
5522         mode_refresh = drm_mode_vrefresh(&mode);
5523
5524         if (preferred_mode == NULL) {
5525                 /*
5526                  * This may not be an error, the use case is when we have no
5527                  * usermode calls to reset and set mode upon hotplug. In this
5528                  * case, we call set mode ourselves to restore the previous mode
5529                  * and the modelist may not be filled in in time.
5530                  */
5531                 DRM_DEBUG_DRIVER("No preferred mode found\n");
5532         } else {
5533                 recalculate_timing |= amdgpu_freesync_vid_mode &&
5534                                  is_freesync_video_mode(&mode, aconnector);
5535                 if (recalculate_timing) {
5536                         freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5537                         saved_mode = mode;
5538                         mode = *freesync_mode;
5539                 } else {
5540                         decide_crtc_timing_for_drm_display_mode(
5541                                 &mode, preferred_mode,
5542                                 dm_state ? (dm_state->scaling != RMX_OFF) : false);
5543                 }
5544
5545                 preferred_refresh = drm_mode_vrefresh(preferred_mode);
5546         }
5547
5548         if (recalculate_timing)
5549                 drm_mode_set_crtcinfo(&saved_mode, 0);
5550         else if (!dm_state)
5551                 drm_mode_set_crtcinfo(&mode, 0);
5552
5553        /*
5554         * If scaling is enabled and refresh rate didn't change
5555         * we copy the vic and polarities of the old timings
5556         */
5557         if (!recalculate_timing || mode_refresh != preferred_refresh)
5558                 fill_stream_properties_from_drm_display_mode(
5559                         stream, &mode, &aconnector->base, con_state, NULL,
5560                         requested_bpc);
5561         else
5562                 fill_stream_properties_from_drm_display_mode(
5563                         stream, &mode, &aconnector->base, con_state, old_stream,
5564                         requested_bpc);
5565
5566         stream->timing.flags.DSC = 0;
5567
5568         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5569 #if defined(CONFIG_DRM_AMD_DC_DCN)
5570                 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5571                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5572                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5573                                       &dsc_caps);
5574                 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5575                                                              dc_link_get_link_cap(aconnector->dc_link));
5576
5577                 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5578                         /* Set DSC policy according to dsc_clock_en */
5579                         dc_dsc_policy_set_enable_dsc_when_not_needed(
5580                                 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5581
5582                         if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5583                                                   &dsc_caps,
5584                                                   aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5585                                                   0,
5586                                                   link_bandwidth_kbps,
5587                                                   &stream->timing,
5588                                                   &stream->timing.dsc_cfg))
5589                                 stream->timing.flags.DSC = 1;
5590                         /* Overwrite the stream flag if DSC is enabled through debugfs */
5591                         if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5592                                 stream->timing.flags.DSC = 1;
5593
5594                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5595                                 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5596
5597                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5598                                 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5599
5600                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5601                                 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5602                 }
5603 #endif
5604         }
5605
5606         update_stream_scaling_settings(&mode, dm_state, stream);
5607
5608         fill_audio_info(
5609                 &stream->audio_info,
5610                 drm_connector,
5611                 sink);
5612
5613         update_stream_signal(stream, sink);
5614
5615         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5616                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5617
5618         if (stream->link->psr_settings.psr_feature_enabled) {
5619                 //
5620                 // should decide stream support vsc sdp colorimetry capability
5621                 // before building vsc info packet
5622                 //
5623                 stream->use_vsc_sdp_for_colorimetry = false;
5624                 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5625                         stream->use_vsc_sdp_for_colorimetry =
5626                                 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5627                 } else {
5628                         if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5629                                 stream->use_vsc_sdp_for_colorimetry = true;
5630                 }
5631                 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5632         }
5633 finish:
5634         dc_sink_release(sink);
5635
5636         return stream;
5637 }
5638
5639 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5640 {
5641         drm_crtc_cleanup(crtc);
5642         kfree(crtc);
5643 }
5644
5645 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5646                                   struct drm_crtc_state *state)
5647 {
5648         struct dm_crtc_state *cur = to_dm_crtc_state(state);
5649
5650         /* TODO Destroy dc_stream objects are stream object is flattened */
5651         if (cur->stream)
5652                 dc_stream_release(cur->stream);
5653
5654
5655         __drm_atomic_helper_crtc_destroy_state(state);
5656
5657
5658         kfree(state);
5659 }
5660
5661 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5662 {
5663         struct dm_crtc_state *state;
5664
5665         if (crtc->state)
5666                 dm_crtc_destroy_state(crtc, crtc->state);
5667
5668         state = kzalloc(sizeof(*state), GFP_KERNEL);
5669         if (WARN_ON(!state))
5670                 return;
5671
5672         __drm_atomic_helper_crtc_reset(crtc, &state->base);
5673 }
5674
5675 static struct drm_crtc_state *
5676 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5677 {
5678         struct dm_crtc_state *state, *cur;
5679
5680         cur = to_dm_crtc_state(crtc->state);
5681
5682         if (WARN_ON(!crtc->state))
5683                 return NULL;
5684
5685         state = kzalloc(sizeof(*state), GFP_KERNEL);
5686         if (!state)
5687                 return NULL;
5688
5689         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5690
5691         if (cur->stream) {
5692                 state->stream = cur->stream;
5693                 dc_stream_retain(state->stream);
5694         }
5695
5696         state->active_planes = cur->active_planes;
5697         state->vrr_infopacket = cur->vrr_infopacket;
5698         state->abm_level = cur->abm_level;
5699         state->vrr_supported = cur->vrr_supported;
5700         state->freesync_config = cur->freesync_config;
5701         state->cm_has_degamma = cur->cm_has_degamma;
5702         state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5703         /* TODO Duplicate dc_stream after objects are stream object is flattened */
5704
5705         return &state->base;
5706 }
5707
5708 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
5709 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
5710 {
5711         crtc_debugfs_init(crtc);
5712
5713         return 0;
5714 }
5715 #endif
5716
5717 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5718 {
5719         enum dc_irq_source irq_source;
5720         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5721         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5722         int rc;
5723
5724         irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5725
5726         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5727
5728         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
5729                       acrtc->crtc_id, enable ? "en" : "dis", rc);
5730         return rc;
5731 }
5732
5733 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5734 {
5735         enum dc_irq_source irq_source;
5736         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5737         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5738         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5739 #if defined(CONFIG_DRM_AMD_DC_DCN)
5740         struct amdgpu_display_manager *dm = &adev->dm;
5741         unsigned long flags;
5742 #endif
5743         int rc = 0;
5744
5745         if (enable) {
5746                 /* vblank irq on -> Only need vupdate irq in vrr mode */
5747                 if (amdgpu_dm_vrr_active(acrtc_state))
5748                         rc = dm_set_vupdate_irq(crtc, true);
5749         } else {
5750                 /* vblank irq off -> vupdate irq off */
5751                 rc = dm_set_vupdate_irq(crtc, false);
5752         }
5753
5754         if (rc)
5755                 return rc;
5756
5757         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5758
5759         if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5760                 return -EBUSY;
5761
5762         if (amdgpu_in_reset(adev))
5763                 return 0;
5764
5765 #if defined(CONFIG_DRM_AMD_DC_DCN)
5766         spin_lock_irqsave(&dm->vblank_lock, flags);
5767         dm->vblank_workqueue->dm = dm;
5768         dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
5769         dm->vblank_workqueue->enable = enable;
5770         spin_unlock_irqrestore(&dm->vblank_lock, flags);
5771         schedule_work(&dm->vblank_workqueue->mall_work);
5772 #endif
5773
5774         return 0;
5775 }
5776
5777 static int dm_enable_vblank(struct drm_crtc *crtc)
5778 {
5779         return dm_set_vblank(crtc, true);
5780 }
5781
5782 static void dm_disable_vblank(struct drm_crtc *crtc)
5783 {
5784         dm_set_vblank(crtc, false);
5785 }
5786
5787 /* Implemented only the options currently availible for the driver */
5788 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5789         .reset = dm_crtc_reset_state,
5790         .destroy = amdgpu_dm_crtc_destroy,
5791         .set_config = drm_atomic_helper_set_config,
5792         .page_flip = drm_atomic_helper_page_flip,
5793         .atomic_duplicate_state = dm_crtc_duplicate_state,
5794         .atomic_destroy_state = dm_crtc_destroy_state,
5795         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
5796         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5797         .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5798         .get_vblank_counter = amdgpu_get_vblank_counter_kms,
5799         .enable_vblank = dm_enable_vblank,
5800         .disable_vblank = dm_disable_vblank,
5801         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5802 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
5803         .late_register = amdgpu_dm_crtc_late_register,
5804 #endif
5805 };
5806
5807 static enum drm_connector_status
5808 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5809 {
5810         bool connected;
5811         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5812
5813         /*
5814          * Notes:
5815          * 1. This interface is NOT called in context of HPD irq.
5816          * 2. This interface *is called* in context of user-mode ioctl. Which
5817          * makes it a bad place for *any* MST-related activity.
5818          */
5819
5820         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5821             !aconnector->fake_enable)
5822                 connected = (aconnector->dc_sink != NULL);
5823         else
5824                 connected = (aconnector->base.force == DRM_FORCE_ON);
5825
5826         update_subconnector_property(aconnector);
5827
5828         return (connected ? connector_status_connected :
5829                         connector_status_disconnected);
5830 }
5831
5832 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5833                                             struct drm_connector_state *connector_state,
5834                                             struct drm_property *property,
5835                                             uint64_t val)
5836 {
5837         struct drm_device *dev = connector->dev;
5838         struct amdgpu_device *adev = drm_to_adev(dev);
5839         struct dm_connector_state *dm_old_state =
5840                 to_dm_connector_state(connector->state);
5841         struct dm_connector_state *dm_new_state =
5842                 to_dm_connector_state(connector_state);
5843
5844         int ret = -EINVAL;
5845
5846         if (property == dev->mode_config.scaling_mode_property) {
5847                 enum amdgpu_rmx_type rmx_type;
5848
5849                 switch (val) {
5850                 case DRM_MODE_SCALE_CENTER:
5851                         rmx_type = RMX_CENTER;
5852                         break;
5853                 case DRM_MODE_SCALE_ASPECT:
5854                         rmx_type = RMX_ASPECT;
5855                         break;
5856                 case DRM_MODE_SCALE_FULLSCREEN:
5857                         rmx_type = RMX_FULL;
5858                         break;
5859                 case DRM_MODE_SCALE_NONE:
5860                 default:
5861                         rmx_type = RMX_OFF;
5862                         break;
5863                 }
5864
5865                 if (dm_old_state->scaling == rmx_type)
5866                         return 0;
5867
5868                 dm_new_state->scaling = rmx_type;
5869                 ret = 0;
5870         } else if (property == adev->mode_info.underscan_hborder_property) {
5871                 dm_new_state->underscan_hborder = val;
5872                 ret = 0;
5873         } else if (property == adev->mode_info.underscan_vborder_property) {
5874                 dm_new_state->underscan_vborder = val;
5875                 ret = 0;
5876         } else if (property == adev->mode_info.underscan_property) {
5877                 dm_new_state->underscan_enable = val;
5878                 ret = 0;
5879         } else if (property == adev->mode_info.abm_level_property) {
5880                 dm_new_state->abm_level = val;
5881                 ret = 0;
5882         }
5883
5884         return ret;
5885 }
5886
5887 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5888                                             const struct drm_connector_state *state,
5889                                             struct drm_property *property,
5890                                             uint64_t *val)
5891 {
5892         struct drm_device *dev = connector->dev;
5893         struct amdgpu_device *adev = drm_to_adev(dev);
5894         struct dm_connector_state *dm_state =
5895                 to_dm_connector_state(state);
5896         int ret = -EINVAL;
5897
5898         if (property == dev->mode_config.scaling_mode_property) {
5899                 switch (dm_state->scaling) {
5900                 case RMX_CENTER:
5901                         *val = DRM_MODE_SCALE_CENTER;
5902                         break;
5903                 case RMX_ASPECT:
5904                         *val = DRM_MODE_SCALE_ASPECT;
5905                         break;
5906                 case RMX_FULL:
5907                         *val = DRM_MODE_SCALE_FULLSCREEN;
5908                         break;
5909                 case RMX_OFF:
5910                 default:
5911                         *val = DRM_MODE_SCALE_NONE;
5912                         break;
5913                 }
5914                 ret = 0;
5915         } else if (property == adev->mode_info.underscan_hborder_property) {
5916                 *val = dm_state->underscan_hborder;
5917                 ret = 0;
5918         } else if (property == adev->mode_info.underscan_vborder_property) {
5919                 *val = dm_state->underscan_vborder;
5920                 ret = 0;
5921         } else if (property == adev->mode_info.underscan_property) {
5922                 *val = dm_state->underscan_enable;
5923                 ret = 0;
5924         } else if (property == adev->mode_info.abm_level_property) {
5925                 *val = dm_state->abm_level;
5926                 ret = 0;
5927         }
5928
5929         return ret;
5930 }
5931
5932 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5933 {
5934         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5935
5936         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5937 }
5938
5939 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5940 {
5941         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5942         const struct dc_link *link = aconnector->dc_link;
5943         struct amdgpu_device *adev = drm_to_adev(connector->dev);
5944         struct amdgpu_display_manager *dm = &adev->dm;
5945
5946         /*
5947          * Call only if mst_mgr was iniitalized before since it's not done
5948          * for all connector types.
5949          */
5950         if (aconnector->mst_mgr.dev)
5951                 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5952
5953 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5954         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5955
5956         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5957             link->type != dc_connection_none &&
5958             dm->backlight_dev) {
5959                 backlight_device_unregister(dm->backlight_dev);
5960                 dm->backlight_dev = NULL;
5961         }
5962 #endif
5963
5964         if (aconnector->dc_em_sink)
5965                 dc_sink_release(aconnector->dc_em_sink);
5966         aconnector->dc_em_sink = NULL;
5967         if (aconnector->dc_sink)
5968                 dc_sink_release(aconnector->dc_sink);
5969         aconnector->dc_sink = NULL;
5970
5971         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5972         drm_connector_unregister(connector);
5973         drm_connector_cleanup(connector);
5974         if (aconnector->i2c) {
5975                 i2c_del_adapter(&aconnector->i2c->base);
5976                 kfree(aconnector->i2c);
5977         }
5978         kfree(aconnector->dm_dp_aux.aux.name);
5979
5980         kfree(connector);
5981 }
5982
5983 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5984 {
5985         struct dm_connector_state *state =
5986                 to_dm_connector_state(connector->state);
5987
5988         if (connector->state)
5989                 __drm_atomic_helper_connector_destroy_state(connector->state);
5990
5991         kfree(state);
5992
5993         state = kzalloc(sizeof(*state), GFP_KERNEL);
5994
5995         if (state) {
5996                 state->scaling = RMX_OFF;
5997                 state->underscan_enable = false;
5998                 state->underscan_hborder = 0;
5999                 state->underscan_vborder = 0;
6000                 state->base.max_requested_bpc = 8;
6001                 state->vcpi_slots = 0;
6002                 state->pbn = 0;
6003                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6004                         state->abm_level = amdgpu_dm_abm_level;
6005
6006                 __drm_atomic_helper_connector_reset(connector, &state->base);
6007         }
6008 }
6009
6010 struct drm_connector_state *
6011 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6012 {
6013         struct dm_connector_state *state =
6014                 to_dm_connector_state(connector->state);
6015
6016         struct dm_connector_state *new_state =
6017                         kmemdup(state, sizeof(*state), GFP_KERNEL);
6018
6019         if (!new_state)
6020                 return NULL;
6021
6022         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6023
6024         new_state->freesync_capable = state->freesync_capable;
6025         new_state->abm_level = state->abm_level;
6026         new_state->scaling = state->scaling;
6027         new_state->underscan_enable = state->underscan_enable;
6028         new_state->underscan_hborder = state->underscan_hborder;
6029         new_state->underscan_vborder = state->underscan_vborder;
6030         new_state->vcpi_slots = state->vcpi_slots;
6031         new_state->pbn = state->pbn;
6032         return &new_state->base;
6033 }
6034
6035 static int
6036 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6037 {
6038         struct amdgpu_dm_connector *amdgpu_dm_connector =
6039                 to_amdgpu_dm_connector(connector);
6040         int r;
6041
6042         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6043             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6044                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6045                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6046                 if (r)
6047                         return r;
6048         }
6049
6050 #if defined(CONFIG_DEBUG_FS)
6051         connector_debugfs_init(amdgpu_dm_connector);
6052 #endif
6053
6054         return 0;
6055 }
6056
6057 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6058         .reset = amdgpu_dm_connector_funcs_reset,
6059         .detect = amdgpu_dm_connector_detect,
6060         .fill_modes = drm_helper_probe_single_connector_modes,
6061         .destroy = amdgpu_dm_connector_destroy,
6062         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6063         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6064         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6065         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6066         .late_register = amdgpu_dm_connector_late_register,
6067         .early_unregister = amdgpu_dm_connector_unregister
6068 };
6069
6070 static int get_modes(struct drm_connector *connector)
6071 {
6072         return amdgpu_dm_connector_get_modes(connector);
6073 }
6074
6075 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6076 {
6077         struct dc_sink_init_data init_params = {
6078                         .link = aconnector->dc_link,
6079                         .sink_signal = SIGNAL_TYPE_VIRTUAL
6080         };
6081         struct edid *edid;
6082
6083         if (!aconnector->base.edid_blob_ptr) {
6084                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6085                                 aconnector->base.name);
6086
6087                 aconnector->base.force = DRM_FORCE_OFF;
6088                 aconnector->base.override_edid = false;
6089                 return;
6090         }
6091
6092         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6093
6094         aconnector->edid = edid;
6095
6096         aconnector->dc_em_sink = dc_link_add_remote_sink(
6097                 aconnector->dc_link,
6098                 (uint8_t *)edid,
6099                 (edid->extensions + 1) * EDID_LENGTH,
6100                 &init_params);
6101
6102         if (aconnector->base.force == DRM_FORCE_ON) {
6103                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6104                 aconnector->dc_link->local_sink :
6105                 aconnector->dc_em_sink;
6106                 dc_sink_retain(aconnector->dc_sink);
6107         }
6108 }
6109
6110 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6111 {
6112         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6113
6114         /*
6115          * In case of headless boot with force on for DP managed connector
6116          * Those settings have to be != 0 to get initial modeset
6117          */
6118         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6119                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6120                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6121         }
6122
6123
6124         aconnector->base.override_edid = true;
6125         create_eml_sink(aconnector);
6126 }
6127
6128 static struct dc_stream_state *
6129 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6130                                 const struct drm_display_mode *drm_mode,
6131                                 const struct dm_connector_state *dm_state,
6132                                 const struct dc_stream_state *old_stream)
6133 {
6134         struct drm_connector *connector = &aconnector->base;
6135         struct amdgpu_device *adev = drm_to_adev(connector->dev);
6136         struct dc_stream_state *stream;
6137         const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6138         int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6139         enum dc_status dc_result = DC_OK;
6140
6141         do {
6142                 stream = create_stream_for_sink(aconnector, drm_mode,
6143                                                 dm_state, old_stream,
6144                                                 requested_bpc);
6145                 if (stream == NULL) {
6146                         DRM_ERROR("Failed to create stream for sink!\n");
6147                         break;
6148                 }
6149
6150                 dc_result = dc_validate_stream(adev->dm.dc, stream);
6151
6152                 if (dc_result != DC_OK) {
6153                         DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6154                                       drm_mode->hdisplay,
6155                                       drm_mode->vdisplay,
6156                                       drm_mode->clock,
6157                                       dc_result,
6158                                       dc_status_to_str(dc_result));
6159
6160                         dc_stream_release(stream);
6161                         stream = NULL;
6162                         requested_bpc -= 2; /* lower bpc to retry validation */
6163                 }
6164
6165         } while (stream == NULL && requested_bpc >= 6);
6166
6167         if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6168                 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6169
6170                 aconnector->force_yuv420_output = true;
6171                 stream = create_validate_stream_for_sink(aconnector, drm_mode,
6172                                                 dm_state, old_stream);
6173                 aconnector->force_yuv420_output = false;
6174         }
6175
6176         return stream;
6177 }
6178
6179 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6180                                    struct drm_display_mode *mode)
6181 {
6182         int result = MODE_ERROR;
6183         struct dc_sink *dc_sink;
6184         /* TODO: Unhardcode stream count */
6185         struct dc_stream_state *stream;
6186         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6187
6188         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6189                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6190                 return result;
6191
6192         /*
6193          * Only run this the first time mode_valid is called to initilialize
6194          * EDID mgmt
6195          */
6196         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6197                 !aconnector->dc_em_sink)
6198                 handle_edid_mgmt(aconnector);
6199
6200         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6201
6202         if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6203                                 aconnector->base.force != DRM_FORCE_ON) {
6204                 DRM_ERROR("dc_sink is NULL!\n");
6205                 goto fail;
6206         }
6207
6208         stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6209         if (stream) {
6210                 dc_stream_release(stream);
6211                 result = MODE_OK;
6212         }
6213
6214 fail:
6215         /* TODO: error handling*/
6216         return result;
6217 }
6218
6219 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6220                                 struct dc_info_packet *out)
6221 {
6222         struct hdmi_drm_infoframe frame;
6223         unsigned char buf[30]; /* 26 + 4 */
6224         ssize_t len;
6225         int ret, i;
6226
6227         memset(out, 0, sizeof(*out));
6228
6229         if (!state->hdr_output_metadata)
6230                 return 0;
6231
6232         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6233         if (ret)
6234                 return ret;
6235
6236         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6237         if (len < 0)
6238                 return (int)len;
6239
6240         /* Static metadata is a fixed 26 bytes + 4 byte header. */
6241         if (len != 30)
6242                 return -EINVAL;
6243
6244         /* Prepare the infopacket for DC. */
6245         switch (state->connector->connector_type) {
6246         case DRM_MODE_CONNECTOR_HDMIA:
6247                 out->hb0 = 0x87; /* type */
6248                 out->hb1 = 0x01; /* version */
6249                 out->hb2 = 0x1A; /* length */
6250                 out->sb[0] = buf[3]; /* checksum */
6251                 i = 1;
6252                 break;
6253
6254         case DRM_MODE_CONNECTOR_DisplayPort:
6255         case DRM_MODE_CONNECTOR_eDP:
6256                 out->hb0 = 0x00; /* sdp id, zero */
6257                 out->hb1 = 0x87; /* type */
6258                 out->hb2 = 0x1D; /* payload len - 1 */
6259                 out->hb3 = (0x13 << 2); /* sdp version */
6260                 out->sb[0] = 0x01; /* version */
6261                 out->sb[1] = 0x1A; /* length */
6262                 i = 2;
6263                 break;
6264
6265         default:
6266                 return -EINVAL;
6267         }
6268
6269         memcpy(&out->sb[i], &buf[4], 26);
6270         out->valid = true;
6271
6272         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6273                        sizeof(out->sb), false);
6274
6275         return 0;
6276 }
6277
6278 static int
6279 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6280                                  struct drm_atomic_state *state)
6281 {
6282         struct drm_connector_state *new_con_state =
6283                 drm_atomic_get_new_connector_state(state, conn);
6284         struct drm_connector_state *old_con_state =
6285                 drm_atomic_get_old_connector_state(state, conn);
6286         struct drm_crtc *crtc = new_con_state->crtc;
6287         struct drm_crtc_state *new_crtc_state;
6288         int ret;
6289
6290         trace_amdgpu_dm_connector_atomic_check(new_con_state);
6291
6292         if (!crtc)
6293                 return 0;
6294
6295         if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
6296                 struct dc_info_packet hdr_infopacket;
6297
6298                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6299                 if (ret)
6300                         return ret;
6301
6302                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6303                 if (IS_ERR(new_crtc_state))
6304                         return PTR_ERR(new_crtc_state);
6305
6306                 /*
6307                  * DC considers the stream backends changed if the
6308                  * static metadata changes. Forcing the modeset also
6309                  * gives a simple way for userspace to switch from
6310                  * 8bpc to 10bpc when setting the metadata to enter
6311                  * or exit HDR.
6312                  *
6313                  * Changing the static metadata after it's been
6314                  * set is permissible, however. So only force a
6315                  * modeset if we're entering or exiting HDR.
6316                  */
6317                 new_crtc_state->mode_changed =
6318                         !old_con_state->hdr_output_metadata ||
6319                         !new_con_state->hdr_output_metadata;
6320         }
6321
6322         return 0;
6323 }
6324
6325 static const struct drm_connector_helper_funcs
6326 amdgpu_dm_connector_helper_funcs = {
6327         /*
6328          * If hotplugging a second bigger display in FB Con mode, bigger resolution
6329          * modes will be filtered by drm_mode_validate_size(), and those modes
6330          * are missing after user start lightdm. So we need to renew modes list.
6331          * in get_modes call back, not just return the modes count
6332          */
6333         .get_modes = get_modes,
6334         .mode_valid = amdgpu_dm_connector_mode_valid,
6335         .atomic_check = amdgpu_dm_connector_atomic_check,
6336 };
6337
6338 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6339 {
6340 }
6341
6342 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6343 {
6344         struct drm_atomic_state *state = new_crtc_state->state;
6345         struct drm_plane *plane;
6346         int num_active = 0;
6347
6348         drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6349                 struct drm_plane_state *new_plane_state;
6350
6351                 /* Cursor planes are "fake". */
6352                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6353                         continue;
6354
6355                 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6356
6357                 if (!new_plane_state) {
6358                         /*
6359                          * The plane is enable on the CRTC and hasn't changed
6360                          * state. This means that it previously passed
6361                          * validation and is therefore enabled.
6362                          */
6363                         num_active += 1;
6364                         continue;
6365                 }
6366
6367                 /* We need a framebuffer to be considered enabled. */
6368                 num_active += (new_plane_state->fb != NULL);
6369         }
6370
6371         return num_active;
6372 }
6373
6374 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6375                                          struct drm_crtc_state *new_crtc_state)
6376 {
6377         struct dm_crtc_state *dm_new_crtc_state =
6378                 to_dm_crtc_state(new_crtc_state);
6379
6380         dm_new_crtc_state->active_planes = 0;
6381
6382         if (!dm_new_crtc_state->stream)
6383                 return;
6384
6385         dm_new_crtc_state->active_planes =
6386                 count_crtc_active_planes(new_crtc_state);
6387 }
6388
6389 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6390                                        struct drm_atomic_state *state)
6391 {
6392         struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6393                                                                           crtc);
6394         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6395         struct dc *dc = adev->dm.dc;
6396         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6397         int ret = -EINVAL;
6398
6399         trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6400
6401         dm_update_crtc_active_planes(crtc, crtc_state);
6402
6403         if (unlikely(!dm_crtc_state->stream &&
6404                      modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6405                 WARN_ON(1);
6406                 return ret;
6407         }
6408
6409         /*
6410          * We require the primary plane to be enabled whenever the CRTC is, otherwise
6411          * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6412          * planes are disabled, which is not supported by the hardware. And there is legacy
6413          * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6414          */
6415         if (crtc_state->enable &&
6416             !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6417                 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6418                 return -EINVAL;
6419         }
6420
6421         /* In some use cases, like reset, no stream is attached */
6422         if (!dm_crtc_state->stream)
6423                 return 0;
6424
6425         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6426                 return 0;
6427
6428         DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6429         return ret;
6430 }
6431
6432 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6433                                       const struct drm_display_mode *mode,
6434                                       struct drm_display_mode *adjusted_mode)
6435 {
6436         return true;
6437 }
6438
6439 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6440         .disable = dm_crtc_helper_disable,
6441         .atomic_check = dm_crtc_helper_atomic_check,
6442         .mode_fixup = dm_crtc_helper_mode_fixup,
6443         .get_scanout_position = amdgpu_crtc_get_scanout_position,
6444 };
6445
6446 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6447 {
6448
6449 }
6450
6451 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6452 {
6453         switch (display_color_depth) {
6454                 case COLOR_DEPTH_666:
6455                         return 6;
6456                 case COLOR_DEPTH_888:
6457                         return 8;
6458                 case COLOR_DEPTH_101010:
6459                         return 10;
6460                 case COLOR_DEPTH_121212:
6461                         return 12;
6462                 case COLOR_DEPTH_141414:
6463                         return 14;
6464                 case COLOR_DEPTH_161616:
6465                         return 16;
6466                 default:
6467                         break;
6468                 }
6469         return 0;
6470 }
6471
6472 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6473                                           struct drm_crtc_state *crtc_state,
6474                                           struct drm_connector_state *conn_state)
6475 {
6476         struct drm_atomic_state *state = crtc_state->state;
6477         struct drm_connector *connector = conn_state->connector;
6478         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6479         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6480         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6481         struct drm_dp_mst_topology_mgr *mst_mgr;
6482         struct drm_dp_mst_port *mst_port;
6483         enum dc_color_depth color_depth;
6484         int clock, bpp = 0;
6485         bool is_y420 = false;
6486
6487         if (!aconnector->port || !aconnector->dc_sink)
6488                 return 0;
6489
6490         mst_port = aconnector->port;
6491         mst_mgr = &aconnector->mst_port->mst_mgr;
6492
6493         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6494                 return 0;
6495
6496         if (!state->duplicated) {
6497                 int max_bpc = conn_state->max_requested_bpc;
6498                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6499                                 aconnector->force_yuv420_output;
6500                 color_depth = convert_color_depth_from_display_info(connector,
6501                                                                     is_y420,
6502                                                                     max_bpc);
6503                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6504                 clock = adjusted_mode->clock;
6505                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6506         }
6507         dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6508                                                                            mst_mgr,
6509                                                                            mst_port,
6510                                                                            dm_new_connector_state->pbn,
6511                                                                            dm_mst_get_pbn_divider(aconnector->dc_link));
6512         if (dm_new_connector_state->vcpi_slots < 0) {
6513                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6514                 return dm_new_connector_state->vcpi_slots;
6515         }
6516         return 0;
6517 }
6518
6519 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6520         .disable = dm_encoder_helper_disable,
6521         .atomic_check = dm_encoder_helper_atomic_check
6522 };
6523
6524 #if defined(CONFIG_DRM_AMD_DC_DCN)
6525 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6526                                             struct dc_state *dc_state)
6527 {
6528         struct dc_stream_state *stream = NULL;
6529         struct drm_connector *connector;
6530         struct drm_connector_state *new_con_state, *old_con_state;
6531         struct amdgpu_dm_connector *aconnector;
6532         struct dm_connector_state *dm_conn_state;
6533         int i, j, clock, bpp;
6534         int vcpi, pbn_div, pbn = 0;
6535
6536         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6537
6538                 aconnector = to_amdgpu_dm_connector(connector);
6539
6540                 if (!aconnector->port)
6541                         continue;
6542
6543                 if (!new_con_state || !new_con_state->crtc)
6544                         continue;
6545
6546                 dm_conn_state = to_dm_connector_state(new_con_state);
6547
6548                 for (j = 0; j < dc_state->stream_count; j++) {
6549                         stream = dc_state->streams[j];
6550                         if (!stream)
6551                                 continue;
6552
6553                         if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6554                                 break;
6555
6556                         stream = NULL;
6557                 }
6558
6559                 if (!stream)
6560                         continue;
6561
6562                 if (stream->timing.flags.DSC != 1) {
6563                         drm_dp_mst_atomic_enable_dsc(state,
6564                                                      aconnector->port,
6565                                                      dm_conn_state->pbn,
6566                                                      0,
6567                                                      false);
6568                         continue;
6569                 }
6570
6571                 pbn_div = dm_mst_get_pbn_divider(stream->link);
6572                 bpp = stream->timing.dsc_cfg.bits_per_pixel;
6573                 clock = stream->timing.pix_clk_100hz / 10;
6574                 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6575                 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6576                                                     aconnector->port,
6577                                                     pbn, pbn_div,
6578                                                     true);
6579                 if (vcpi < 0)
6580                         return vcpi;
6581
6582                 dm_conn_state->pbn = pbn;
6583                 dm_conn_state->vcpi_slots = vcpi;
6584         }
6585         return 0;
6586 }
6587 #endif
6588
6589 static void dm_drm_plane_reset(struct drm_plane *plane)
6590 {
6591         struct dm_plane_state *amdgpu_state = NULL;
6592
6593         if (plane->state)
6594                 plane->funcs->atomic_destroy_state(plane, plane->state);
6595
6596         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6597         WARN_ON(amdgpu_state == NULL);
6598
6599         if (amdgpu_state)
6600                 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6601 }
6602
6603 static struct drm_plane_state *
6604 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6605 {
6606         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6607
6608         old_dm_plane_state = to_dm_plane_state(plane->state);
6609         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6610         if (!dm_plane_state)
6611                 return NULL;
6612
6613         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6614
6615         if (old_dm_plane_state->dc_state) {
6616                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6617                 dc_plane_state_retain(dm_plane_state->dc_state);
6618         }
6619
6620         return &dm_plane_state->base;
6621 }
6622
6623 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6624                                 struct drm_plane_state *state)
6625 {
6626         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6627
6628         if (dm_plane_state->dc_state)
6629                 dc_plane_state_release(dm_plane_state->dc_state);
6630
6631         drm_atomic_helper_plane_destroy_state(plane, state);
6632 }
6633
6634 static const struct drm_plane_funcs dm_plane_funcs = {
6635         .update_plane   = drm_atomic_helper_update_plane,
6636         .disable_plane  = drm_atomic_helper_disable_plane,
6637         .destroy        = drm_primary_helper_destroy,
6638         .reset = dm_drm_plane_reset,
6639         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6640         .atomic_destroy_state = dm_drm_plane_destroy_state,
6641         .format_mod_supported = dm_plane_format_mod_supported,
6642 };
6643
6644 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6645                                       struct drm_plane_state *new_state)
6646 {
6647         struct amdgpu_framebuffer *afb;
6648         struct drm_gem_object *obj;
6649         struct amdgpu_device *adev;
6650         struct amdgpu_bo *rbo;
6651         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6652         struct list_head list;
6653         struct ttm_validate_buffer tv;
6654         struct ww_acquire_ctx ticket;
6655         uint32_t domain;
6656         int r;
6657
6658         if (!new_state->fb) {
6659                 DRM_DEBUG_KMS("No FB bound\n");
6660                 return 0;
6661         }
6662
6663         afb = to_amdgpu_framebuffer(new_state->fb);
6664         obj = new_state->fb->obj[0];
6665         rbo = gem_to_amdgpu_bo(obj);
6666         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6667         INIT_LIST_HEAD(&list);
6668
6669         tv.bo = &rbo->tbo;
6670         tv.num_shared = 1;
6671         list_add(&tv.head, &list);
6672
6673         r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6674         if (r) {
6675                 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6676                 return r;
6677         }
6678
6679         if (plane->type != DRM_PLANE_TYPE_CURSOR)
6680                 domain = amdgpu_display_supported_domains(adev, rbo->flags);
6681         else
6682                 domain = AMDGPU_GEM_DOMAIN_VRAM;
6683
6684         r = amdgpu_bo_pin(rbo, domain);
6685         if (unlikely(r != 0)) {
6686                 if (r != -ERESTARTSYS)
6687                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6688                 ttm_eu_backoff_reservation(&ticket, &list);
6689                 return r;
6690         }
6691
6692         r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6693         if (unlikely(r != 0)) {
6694                 amdgpu_bo_unpin(rbo);
6695                 ttm_eu_backoff_reservation(&ticket, &list);
6696                 DRM_ERROR("%p bind failed\n", rbo);
6697                 return r;
6698         }
6699
6700         ttm_eu_backoff_reservation(&ticket, &list);
6701
6702         afb->address = amdgpu_bo_gpu_offset(rbo);
6703
6704         amdgpu_bo_ref(rbo);
6705
6706         /**
6707          * We don't do surface updates on planes that have been newly created,
6708          * but we also don't have the afb->address during atomic check.
6709          *
6710          * Fill in buffer attributes depending on the address here, but only on
6711          * newly created planes since they're not being used by DC yet and this
6712          * won't modify global state.
6713          */
6714         dm_plane_state_old = to_dm_plane_state(plane->state);
6715         dm_plane_state_new = to_dm_plane_state(new_state);
6716
6717         if (dm_plane_state_new->dc_state &&
6718             dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6719                 struct dc_plane_state *plane_state =
6720                         dm_plane_state_new->dc_state;
6721                 bool force_disable_dcc = !plane_state->dcc.enable;
6722
6723                 fill_plane_buffer_attributes(
6724                         adev, afb, plane_state->format, plane_state->rotation,
6725                         afb->tiling_flags,
6726                         &plane_state->tiling_info, &plane_state->plane_size,
6727                         &plane_state->dcc, &plane_state->address,
6728                         afb->tmz_surface, force_disable_dcc);
6729         }
6730
6731         return 0;
6732 }
6733
6734 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6735                                        struct drm_plane_state *old_state)
6736 {
6737         struct amdgpu_bo *rbo;
6738         int r;
6739
6740         if (!old_state->fb)
6741                 return;
6742
6743         rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6744         r = amdgpu_bo_reserve(rbo, false);
6745         if (unlikely(r)) {
6746                 DRM_ERROR("failed to reserve rbo before unpin\n");
6747                 return;
6748         }
6749
6750         amdgpu_bo_unpin(rbo);
6751         amdgpu_bo_unreserve(rbo);
6752         amdgpu_bo_unref(&rbo);
6753 }
6754
6755 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6756                                        struct drm_crtc_state *new_crtc_state)
6757 {
6758         struct drm_framebuffer *fb = state->fb;
6759         int min_downscale, max_upscale;
6760         int min_scale = 0;
6761         int max_scale = INT_MAX;
6762
6763         /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6764         if (fb && state->crtc) {
6765                 /* Validate viewport to cover the case when only the position changes */
6766                 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
6767                         int viewport_width = state->crtc_w;
6768                         int viewport_height = state->crtc_h;
6769
6770                         if (state->crtc_x < 0)
6771                                 viewport_width += state->crtc_x;
6772                         else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
6773                                 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
6774
6775                         if (state->crtc_y < 0)
6776                                 viewport_height += state->crtc_y;
6777                         else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
6778                                 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
6779
6780                         if (viewport_width < 0 || viewport_height < 0) {
6781                                 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
6782                                 return -EINVAL;
6783                         } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
6784                                 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
6785                                 return -EINVAL;
6786                         } else if (viewport_height < MIN_VIEWPORT_SIZE) {
6787                                 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
6788                                 return -EINVAL;
6789                         }
6790
6791                 }
6792
6793                 /* Get min/max allowed scaling factors from plane caps. */
6794                 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6795                                              &min_downscale, &max_upscale);
6796                 /*
6797                  * Convert to drm convention: 16.16 fixed point, instead of dc's
6798                  * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6799                  * dst/src, so min_scale = 1.0 / max_upscale, etc.
6800                  */
6801                 min_scale = (1000 << 16) / max_upscale;
6802                 max_scale = (1000 << 16) / min_downscale;
6803         }
6804
6805         return drm_atomic_helper_check_plane_state(
6806                 state, new_crtc_state, min_scale, max_scale, true, true);
6807 }
6808
6809 static int dm_plane_atomic_check(struct drm_plane *plane,
6810                                  struct drm_atomic_state *state)
6811 {
6812         struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
6813                                                                                  plane);
6814         struct amdgpu_device *adev = drm_to_adev(plane->dev);
6815         struct dc *dc = adev->dm.dc;
6816         struct dm_plane_state *dm_plane_state;
6817         struct dc_scaling_info scaling_info;
6818         struct drm_crtc_state *new_crtc_state;
6819         int ret;
6820
6821         trace_amdgpu_dm_plane_atomic_check(new_plane_state);
6822
6823         dm_plane_state = to_dm_plane_state(new_plane_state);
6824
6825         if (!dm_plane_state->dc_state)
6826                 return 0;
6827
6828         new_crtc_state =
6829                 drm_atomic_get_new_crtc_state(state,
6830                                               new_plane_state->crtc);
6831         if (!new_crtc_state)
6832                 return -EINVAL;
6833
6834         ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
6835         if (ret)
6836                 return ret;
6837
6838         ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
6839         if (ret)
6840                 return ret;
6841
6842         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6843                 return 0;
6844
6845         return -EINVAL;
6846 }
6847
6848 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6849                                        struct drm_atomic_state *state)
6850 {
6851         /* Only support async updates on cursor planes. */
6852         if (plane->type != DRM_PLANE_TYPE_CURSOR)
6853                 return -EINVAL;
6854
6855         return 0;
6856 }
6857
6858 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6859                                          struct drm_atomic_state *state)
6860 {
6861         struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
6862                                                                            plane);
6863         struct drm_plane_state *old_state =
6864                 drm_atomic_get_old_plane_state(state, plane);
6865
6866         trace_amdgpu_dm_atomic_update_cursor(new_state);
6867
6868         swap(plane->state->fb, new_state->fb);
6869
6870         plane->state->src_x = new_state->src_x;
6871         plane->state->src_y = new_state->src_y;
6872         plane->state->src_w = new_state->src_w;
6873         plane->state->src_h = new_state->src_h;
6874         plane->state->crtc_x = new_state->crtc_x;
6875         plane->state->crtc_y = new_state->crtc_y;
6876         plane->state->crtc_w = new_state->crtc_w;
6877         plane->state->crtc_h = new_state->crtc_h;
6878
6879         handle_cursor_update(plane, old_state);
6880 }
6881
6882 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6883         .prepare_fb = dm_plane_helper_prepare_fb,
6884         .cleanup_fb = dm_plane_helper_cleanup_fb,
6885         .atomic_check = dm_plane_atomic_check,
6886         .atomic_async_check = dm_plane_atomic_async_check,
6887         .atomic_async_update = dm_plane_atomic_async_update
6888 };
6889
6890 /*
6891  * TODO: these are currently initialized to rgb formats only.
6892  * For future use cases we should either initialize them dynamically based on
6893  * plane capabilities, or initialize this array to all formats, so internal drm
6894  * check will succeed, and let DC implement proper check
6895  */
6896 static const uint32_t rgb_formats[] = {
6897         DRM_FORMAT_XRGB8888,
6898         DRM_FORMAT_ARGB8888,
6899         DRM_FORMAT_RGBA8888,
6900         DRM_FORMAT_XRGB2101010,
6901         DRM_FORMAT_XBGR2101010,
6902         DRM_FORMAT_ARGB2101010,
6903         DRM_FORMAT_ABGR2101010,
6904         DRM_FORMAT_XBGR8888,
6905         DRM_FORMAT_ABGR8888,
6906         DRM_FORMAT_RGB565,
6907 };
6908
6909 static const uint32_t overlay_formats[] = {
6910         DRM_FORMAT_XRGB8888,
6911         DRM_FORMAT_ARGB8888,
6912         DRM_FORMAT_RGBA8888,
6913         DRM_FORMAT_XBGR8888,
6914         DRM_FORMAT_ABGR8888,
6915         DRM_FORMAT_RGB565
6916 };
6917
6918 static const u32 cursor_formats[] = {
6919         DRM_FORMAT_ARGB8888
6920 };
6921
6922 static int get_plane_formats(const struct drm_plane *plane,
6923                              const struct dc_plane_cap *plane_cap,
6924                              uint32_t *formats, int max_formats)
6925 {
6926         int i, num_formats = 0;
6927
6928         /*
6929          * TODO: Query support for each group of formats directly from
6930          * DC plane caps. This will require adding more formats to the
6931          * caps list.
6932          */
6933
6934         switch (plane->type) {
6935         case DRM_PLANE_TYPE_PRIMARY:
6936                 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6937                         if (num_formats >= max_formats)
6938                                 break;
6939
6940                         formats[num_formats++] = rgb_formats[i];
6941                 }
6942
6943                 if (plane_cap && plane_cap->pixel_format_support.nv12)
6944                         formats[num_formats++] = DRM_FORMAT_NV12;
6945                 if (plane_cap && plane_cap->pixel_format_support.p010)
6946                         formats[num_formats++] = DRM_FORMAT_P010;
6947                 if (plane_cap && plane_cap->pixel_format_support.fp16) {
6948                         formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6949                         formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6950                         formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6951                         formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6952                 }
6953                 break;
6954
6955         case DRM_PLANE_TYPE_OVERLAY:
6956                 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6957                         if (num_formats >= max_formats)
6958                                 break;
6959
6960                         formats[num_formats++] = overlay_formats[i];
6961                 }
6962                 break;
6963
6964         case DRM_PLANE_TYPE_CURSOR:
6965                 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6966                         if (num_formats >= max_formats)
6967                                 break;
6968
6969                         formats[num_formats++] = cursor_formats[i];
6970                 }
6971                 break;
6972         }
6973
6974         return num_formats;
6975 }
6976
6977 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6978                                 struct drm_plane *plane,
6979                                 unsigned long possible_crtcs,
6980                                 const struct dc_plane_cap *plane_cap)
6981 {
6982         uint32_t formats[32];
6983         int num_formats;
6984         int res = -EPERM;
6985         unsigned int supported_rotations;
6986         uint64_t *modifiers = NULL;
6987
6988         num_formats = get_plane_formats(plane, plane_cap, formats,
6989                                         ARRAY_SIZE(formats));
6990
6991         res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
6992         if (res)
6993                 return res;
6994
6995         res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
6996                                        &dm_plane_funcs, formats, num_formats,
6997                                        modifiers, plane->type, NULL);
6998         kfree(modifiers);
6999         if (res)
7000                 return res;
7001
7002         if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7003             plane_cap && plane_cap->per_pixel_alpha) {
7004                 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7005                                           BIT(DRM_MODE_BLEND_PREMULTI);
7006
7007                 drm_plane_create_alpha_property(plane);
7008                 drm_plane_create_blend_mode_property(plane, blend_caps);
7009         }
7010
7011         if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7012             plane_cap &&
7013             (plane_cap->pixel_format_support.nv12 ||
7014              plane_cap->pixel_format_support.p010)) {
7015                 /* This only affects YUV formats. */
7016                 drm_plane_create_color_properties(
7017                         plane,
7018                         BIT(DRM_COLOR_YCBCR_BT601) |
7019                         BIT(DRM_COLOR_YCBCR_BT709) |
7020                         BIT(DRM_COLOR_YCBCR_BT2020),
7021                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7022                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7023                         DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7024         }
7025
7026         supported_rotations =
7027                 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7028                 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7029
7030         if (dm->adev->asic_type >= CHIP_BONAIRE &&
7031             plane->type != DRM_PLANE_TYPE_CURSOR)
7032                 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7033                                                    supported_rotations);
7034
7035         drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7036
7037         /* Create (reset) the plane state */
7038         if (plane->funcs->reset)
7039                 plane->funcs->reset(plane);
7040
7041         return 0;
7042 }
7043
7044 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7045                                struct drm_plane *plane,
7046                                uint32_t crtc_index)
7047 {
7048         struct amdgpu_crtc *acrtc = NULL;
7049         struct drm_plane *cursor_plane;
7050
7051         int res = -ENOMEM;
7052
7053         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7054         if (!cursor_plane)
7055                 goto fail;
7056
7057         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7058         res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7059
7060         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7061         if (!acrtc)
7062                 goto fail;
7063
7064         res = drm_crtc_init_with_planes(
7065                         dm->ddev,
7066                         &acrtc->base,
7067                         plane,
7068                         cursor_plane,
7069                         &amdgpu_dm_crtc_funcs, NULL);
7070
7071         if (res)
7072                 goto fail;
7073
7074         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7075
7076         /* Create (reset) the plane state */
7077         if (acrtc->base.funcs->reset)
7078                 acrtc->base.funcs->reset(&acrtc->base);
7079
7080         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7081         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7082
7083         acrtc->crtc_id = crtc_index;
7084         acrtc->base.enabled = false;
7085         acrtc->otg_inst = -1;
7086
7087         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7088         drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7089                                    true, MAX_COLOR_LUT_ENTRIES);
7090         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7091
7092         return 0;
7093
7094 fail:
7095         kfree(acrtc);
7096         kfree(cursor_plane);
7097         return res;
7098 }
7099
7100
7101 static int to_drm_connector_type(enum signal_type st)
7102 {
7103         switch (st) {
7104         case SIGNAL_TYPE_HDMI_TYPE_A:
7105                 return DRM_MODE_CONNECTOR_HDMIA;
7106         case SIGNAL_TYPE_EDP:
7107                 return DRM_MODE_CONNECTOR_eDP;
7108         case SIGNAL_TYPE_LVDS:
7109                 return DRM_MODE_CONNECTOR_LVDS;
7110         case SIGNAL_TYPE_RGB:
7111                 return DRM_MODE_CONNECTOR_VGA;
7112         case SIGNAL_TYPE_DISPLAY_PORT:
7113         case SIGNAL_TYPE_DISPLAY_PORT_MST:
7114                 return DRM_MODE_CONNECTOR_DisplayPort;
7115         case SIGNAL_TYPE_DVI_DUAL_LINK:
7116         case SIGNAL_TYPE_DVI_SINGLE_LINK:
7117                 return DRM_MODE_CONNECTOR_DVID;
7118         case SIGNAL_TYPE_VIRTUAL:
7119                 return DRM_MODE_CONNECTOR_VIRTUAL;
7120
7121         default:
7122                 return DRM_MODE_CONNECTOR_Unknown;
7123         }
7124 }
7125
7126 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7127 {
7128         struct drm_encoder *encoder;
7129
7130         /* There is only one encoder per connector */
7131         drm_connector_for_each_possible_encoder(connector, encoder)
7132                 return encoder;
7133
7134         return NULL;
7135 }
7136
7137 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7138 {
7139         struct drm_encoder *encoder;
7140         struct amdgpu_encoder *amdgpu_encoder;
7141
7142         encoder = amdgpu_dm_connector_to_encoder(connector);
7143
7144         if (encoder == NULL)
7145                 return;
7146
7147         amdgpu_encoder = to_amdgpu_encoder(encoder);
7148
7149         amdgpu_encoder->native_mode.clock = 0;
7150
7151         if (!list_empty(&connector->probed_modes)) {
7152                 struct drm_display_mode *preferred_mode = NULL;
7153
7154                 list_for_each_entry(preferred_mode,
7155                                     &connector->probed_modes,
7156                                     head) {
7157                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7158                                 amdgpu_encoder->native_mode = *preferred_mode;
7159
7160                         break;
7161                 }
7162
7163         }
7164 }
7165
7166 static struct drm_display_mode *
7167 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7168                              char *name,
7169                              int hdisplay, int vdisplay)
7170 {
7171         struct drm_device *dev = encoder->dev;
7172         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7173         struct drm_display_mode *mode = NULL;
7174         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7175
7176         mode = drm_mode_duplicate(dev, native_mode);
7177
7178         if (mode == NULL)
7179                 return NULL;
7180
7181         mode->hdisplay = hdisplay;
7182         mode->vdisplay = vdisplay;
7183         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7184         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7185
7186         return mode;
7187
7188 }
7189
7190 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7191                                                  struct drm_connector *connector)
7192 {
7193         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7194         struct drm_display_mode *mode = NULL;
7195         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7196         struct amdgpu_dm_connector *amdgpu_dm_connector =
7197                                 to_amdgpu_dm_connector(connector);
7198         int i;
7199         int n;
7200         struct mode_size {
7201                 char name[DRM_DISPLAY_MODE_LEN];
7202                 int w;
7203                 int h;
7204         } common_modes[] = {
7205                 {  "640x480",  640,  480},
7206                 {  "800x600",  800,  600},
7207                 { "1024x768", 1024,  768},
7208                 { "1280x720", 1280,  720},
7209                 { "1280x800", 1280,  800},
7210                 {"1280x1024", 1280, 1024},
7211                 { "1440x900", 1440,  900},
7212                 {"1680x1050", 1680, 1050},
7213                 {"1600x1200", 1600, 1200},
7214                 {"1920x1080", 1920, 1080},
7215                 {"1920x1200", 1920, 1200}
7216         };
7217
7218         n = ARRAY_SIZE(common_modes);
7219
7220         for (i = 0; i < n; i++) {
7221                 struct drm_display_mode *curmode = NULL;
7222                 bool mode_existed = false;
7223
7224                 if (common_modes[i].w > native_mode->hdisplay ||
7225                     common_modes[i].h > native_mode->vdisplay ||
7226                    (common_modes[i].w == native_mode->hdisplay &&
7227                     common_modes[i].h == native_mode->vdisplay))
7228                         continue;
7229
7230                 list_for_each_entry(curmode, &connector->probed_modes, head) {
7231                         if (common_modes[i].w == curmode->hdisplay &&
7232                             common_modes[i].h == curmode->vdisplay) {
7233                                 mode_existed = true;
7234                                 break;
7235                         }
7236                 }
7237
7238                 if (mode_existed)
7239                         continue;
7240
7241                 mode = amdgpu_dm_create_common_mode(encoder,
7242                                 common_modes[i].name, common_modes[i].w,
7243                                 common_modes[i].h);
7244                 drm_mode_probed_add(connector, mode);
7245                 amdgpu_dm_connector->num_modes++;
7246         }
7247 }
7248
7249 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7250                                               struct edid *edid)
7251 {
7252         struct amdgpu_dm_connector *amdgpu_dm_connector =
7253                         to_amdgpu_dm_connector(connector);
7254
7255         if (edid) {
7256                 /* empty probed_modes */
7257                 INIT_LIST_HEAD(&connector->probed_modes);
7258                 amdgpu_dm_connector->num_modes =
7259                                 drm_add_edid_modes(connector, edid);
7260
7261                 /* sorting the probed modes before calling function
7262                  * amdgpu_dm_get_native_mode() since EDID can have
7263                  * more than one preferred mode. The modes that are
7264                  * later in the probed mode list could be of higher
7265                  * and preferred resolution. For example, 3840x2160
7266                  * resolution in base EDID preferred timing and 4096x2160
7267                  * preferred resolution in DID extension block later.
7268                  */
7269                 drm_mode_sort(&connector->probed_modes);
7270                 amdgpu_dm_get_native_mode(connector);
7271
7272                 /* Freesync capabilities are reset by calling
7273                  * drm_add_edid_modes() and need to be
7274                  * restored here.
7275                  */
7276                 amdgpu_dm_update_freesync_caps(connector, edid);
7277         } else {
7278                 amdgpu_dm_connector->num_modes = 0;
7279         }
7280 }
7281
7282 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7283                               struct drm_display_mode *mode)
7284 {
7285         struct drm_display_mode *m;
7286
7287         list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7288                 if (drm_mode_equal(m, mode))
7289                         return true;
7290         }
7291
7292         return false;
7293 }
7294
7295 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7296 {
7297         const struct drm_display_mode *m;
7298         struct drm_display_mode *new_mode;
7299         uint i;
7300         uint32_t new_modes_count = 0;
7301
7302         /* Standard FPS values
7303          *
7304          * 23.976   - TV/NTSC
7305          * 24       - Cinema
7306          * 25       - TV/PAL
7307          * 29.97    - TV/NTSC
7308          * 30       - TV/NTSC
7309          * 48       - Cinema HFR
7310          * 50       - TV/PAL
7311          * 60       - Commonly used
7312          * 48,72,96 - Multiples of 24
7313          */
7314         const uint32_t common_rates[] = { 23976, 24000, 25000, 29970, 30000,
7315                                          48000, 50000, 60000, 72000, 96000 };
7316
7317         /*
7318          * Find mode with highest refresh rate with the same resolution
7319          * as the preferred mode. Some monitors report a preferred mode
7320          * with lower resolution than the highest refresh rate supported.
7321          */
7322
7323         m = get_highest_refresh_rate_mode(aconnector, true);
7324         if (!m)
7325                 return 0;
7326
7327         for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7328                 uint64_t target_vtotal, target_vtotal_diff;
7329                 uint64_t num, den;
7330
7331                 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7332                         continue;
7333
7334                 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7335                     common_rates[i] > aconnector->max_vfreq * 1000)
7336                         continue;
7337
7338                 num = (unsigned long long)m->clock * 1000 * 1000;
7339                 den = common_rates[i] * (unsigned long long)m->htotal;
7340                 target_vtotal = div_u64(num, den);
7341                 target_vtotal_diff = target_vtotal - m->vtotal;
7342
7343                 /* Check for illegal modes */
7344                 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7345                     m->vsync_end + target_vtotal_diff < m->vsync_start ||
7346                     m->vtotal + target_vtotal_diff < m->vsync_end)
7347                         continue;
7348
7349                 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7350                 if (!new_mode)
7351                         goto out;
7352
7353                 new_mode->vtotal += (u16)target_vtotal_diff;
7354                 new_mode->vsync_start += (u16)target_vtotal_diff;
7355                 new_mode->vsync_end += (u16)target_vtotal_diff;
7356                 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7357                 new_mode->type |= DRM_MODE_TYPE_DRIVER;
7358
7359                 if (!is_duplicate_mode(aconnector, new_mode)) {
7360                         drm_mode_probed_add(&aconnector->base, new_mode);
7361                         new_modes_count += 1;
7362                 } else
7363                         drm_mode_destroy(aconnector->base.dev, new_mode);
7364         }
7365  out:
7366         return new_modes_count;
7367 }
7368
7369 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7370                                                    struct edid *edid)
7371 {
7372         struct amdgpu_dm_connector *amdgpu_dm_connector =
7373                 to_amdgpu_dm_connector(connector);
7374
7375         if (!(amdgpu_freesync_vid_mode && edid))
7376                 return;
7377
7378         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7379                 amdgpu_dm_connector->num_modes +=
7380                         add_fs_modes(amdgpu_dm_connector);
7381 }
7382
7383 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
7384 {
7385         struct amdgpu_dm_connector *amdgpu_dm_connector =
7386                         to_amdgpu_dm_connector(connector);
7387         struct drm_encoder *encoder;
7388         struct edid *edid = amdgpu_dm_connector->edid;
7389
7390         encoder = amdgpu_dm_connector_to_encoder(connector);
7391
7392         if (!drm_edid_is_valid(edid)) {
7393                 amdgpu_dm_connector->num_modes =
7394                                 drm_add_modes_noedid(connector, 640, 480);
7395         } else {
7396                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
7397                 amdgpu_dm_connector_add_common_modes(encoder, connector);
7398                 amdgpu_dm_connector_add_freesync_modes(connector, edid);
7399         }
7400         amdgpu_dm_fbc_init(connector);
7401
7402         return amdgpu_dm_connector->num_modes;
7403 }
7404
7405 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7406                                      struct amdgpu_dm_connector *aconnector,
7407                                      int connector_type,
7408                                      struct dc_link *link,
7409                                      int link_index)
7410 {
7411         struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7412
7413         /*
7414          * Some of the properties below require access to state, like bpc.
7415          * Allocate some default initial connector state with our reset helper.
7416          */
7417         if (aconnector->base.funcs->reset)
7418                 aconnector->base.funcs->reset(&aconnector->base);
7419
7420         aconnector->connector_id = link_index;
7421         aconnector->dc_link = link;
7422         aconnector->base.interlace_allowed = false;
7423         aconnector->base.doublescan_allowed = false;
7424         aconnector->base.stereo_allowed = false;
7425         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7426         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7427         aconnector->audio_inst = -1;
7428         mutex_init(&aconnector->hpd_lock);
7429
7430         /*
7431          * configure support HPD hot plug connector_>polled default value is 0
7432          * which means HPD hot plug not supported
7433          */
7434         switch (connector_type) {
7435         case DRM_MODE_CONNECTOR_HDMIA:
7436                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7437                 aconnector->base.ycbcr_420_allowed =
7438                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7439                 break;
7440         case DRM_MODE_CONNECTOR_DisplayPort:
7441                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7442                 aconnector->base.ycbcr_420_allowed =
7443                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
7444                 break;
7445         case DRM_MODE_CONNECTOR_DVID:
7446                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7447                 break;
7448         default:
7449                 break;
7450         }
7451
7452         drm_object_attach_property(&aconnector->base.base,
7453                                 dm->ddev->mode_config.scaling_mode_property,
7454                                 DRM_MODE_SCALE_NONE);
7455
7456         drm_object_attach_property(&aconnector->base.base,
7457                                 adev->mode_info.underscan_property,
7458                                 UNDERSCAN_OFF);
7459         drm_object_attach_property(&aconnector->base.base,
7460                                 adev->mode_info.underscan_hborder_property,
7461                                 0);
7462         drm_object_attach_property(&aconnector->base.base,
7463                                 adev->mode_info.underscan_vborder_property,
7464                                 0);
7465
7466         if (!aconnector->mst_port)
7467                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7468
7469         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
7470         aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7471         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7472
7473         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7474             (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7475                 drm_object_attach_property(&aconnector->base.base,
7476                                 adev->mode_info.abm_level_property, 0);
7477         }
7478
7479         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7480             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7481             connector_type == DRM_MODE_CONNECTOR_eDP) {
7482                 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
7483
7484                 if (!aconnector->mst_port)
7485                         drm_connector_attach_vrr_capable_property(&aconnector->base);
7486
7487 #ifdef CONFIG_DRM_AMD_DC_HDCP
7488                 if (adev->dm.hdcp_workqueue)
7489                         drm_connector_attach_content_protection_property(&aconnector->base, true);
7490 #endif
7491         }
7492 }
7493
7494 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7495                               struct i2c_msg *msgs, int num)
7496 {
7497         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7498         struct ddc_service *ddc_service = i2c->ddc_service;
7499         struct i2c_command cmd;
7500         int i;
7501         int result = -EIO;
7502
7503         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7504
7505         if (!cmd.payloads)
7506                 return result;
7507
7508         cmd.number_of_payloads = num;
7509         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7510         cmd.speed = 100;
7511
7512         for (i = 0; i < num; i++) {
7513                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7514                 cmd.payloads[i].address = msgs[i].addr;
7515                 cmd.payloads[i].length = msgs[i].len;
7516                 cmd.payloads[i].data = msgs[i].buf;
7517         }
7518
7519         if (dc_submit_i2c(
7520                         ddc_service->ctx->dc,
7521                         ddc_service->ddc_pin->hw_info.ddc_channel,
7522                         &cmd))
7523                 result = num;
7524
7525         kfree(cmd.payloads);
7526         return result;
7527 }
7528
7529 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7530 {
7531         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7532 }
7533
7534 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7535         .master_xfer = amdgpu_dm_i2c_xfer,
7536         .functionality = amdgpu_dm_i2c_func,
7537 };
7538
7539 static struct amdgpu_i2c_adapter *
7540 create_i2c(struct ddc_service *ddc_service,
7541            int link_index,
7542            int *res)
7543 {
7544         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7545         struct amdgpu_i2c_adapter *i2c;
7546
7547         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7548         if (!i2c)
7549                 return NULL;
7550         i2c->base.owner = THIS_MODULE;
7551         i2c->base.class = I2C_CLASS_DDC;
7552         i2c->base.dev.parent = &adev->pdev->dev;
7553         i2c->base.algo = &amdgpu_dm_i2c_algo;
7554         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7555         i2c_set_adapdata(&i2c->base, i2c);
7556         i2c->ddc_service = ddc_service;
7557         i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7558
7559         return i2c;
7560 }
7561
7562
7563 /*
7564  * Note: this function assumes that dc_link_detect() was called for the
7565  * dc_link which will be represented by this aconnector.
7566  */
7567 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7568                                     struct amdgpu_dm_connector *aconnector,
7569                                     uint32_t link_index,
7570                                     struct amdgpu_encoder *aencoder)
7571 {
7572         int res = 0;
7573         int connector_type;
7574         struct dc *dc = dm->dc;
7575         struct dc_link *link = dc_get_link_at_index(dc, link_index);
7576         struct amdgpu_i2c_adapter *i2c;
7577
7578         link->priv = aconnector;
7579
7580         DRM_DEBUG_DRIVER("%s()\n", __func__);
7581
7582         i2c = create_i2c(link->ddc, link->link_index, &res);
7583         if (!i2c) {
7584                 DRM_ERROR("Failed to create i2c adapter data\n");
7585                 return -ENOMEM;
7586         }
7587
7588         aconnector->i2c = i2c;
7589         res = i2c_add_adapter(&i2c->base);
7590
7591         if (res) {
7592                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7593                 goto out_free;
7594         }
7595
7596         connector_type = to_drm_connector_type(link->connector_signal);
7597
7598         res = drm_connector_init_with_ddc(
7599                         dm->ddev,
7600                         &aconnector->base,
7601                         &amdgpu_dm_connector_funcs,
7602                         connector_type,
7603                         &i2c->base);
7604
7605         if (res) {
7606                 DRM_ERROR("connector_init failed\n");
7607                 aconnector->connector_id = -1;
7608                 goto out_free;
7609         }
7610
7611         drm_connector_helper_add(
7612                         &aconnector->base,
7613                         &amdgpu_dm_connector_helper_funcs);
7614
7615         amdgpu_dm_connector_init_helper(
7616                 dm,
7617                 aconnector,
7618                 connector_type,
7619                 link,
7620                 link_index);
7621
7622         drm_connector_attach_encoder(
7623                 &aconnector->base, &aencoder->base);
7624
7625         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7626                 || connector_type == DRM_MODE_CONNECTOR_eDP)
7627                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7628
7629 out_free:
7630         if (res) {
7631                 kfree(i2c);
7632                 aconnector->i2c = NULL;
7633         }
7634         return res;
7635 }
7636
7637 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7638 {
7639         switch (adev->mode_info.num_crtc) {
7640         case 1:
7641                 return 0x1;
7642         case 2:
7643                 return 0x3;
7644         case 3:
7645                 return 0x7;
7646         case 4:
7647                 return 0xf;
7648         case 5:
7649                 return 0x1f;
7650         case 6:
7651         default:
7652                 return 0x3f;
7653         }
7654 }
7655
7656 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7657                                   struct amdgpu_encoder *aencoder,
7658                                   uint32_t link_index)
7659 {
7660         struct amdgpu_device *adev = drm_to_adev(dev);
7661
7662         int res = drm_encoder_init(dev,
7663                                    &aencoder->base,
7664                                    &amdgpu_dm_encoder_funcs,
7665                                    DRM_MODE_ENCODER_TMDS,
7666                                    NULL);
7667
7668         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7669
7670         if (!res)
7671                 aencoder->encoder_id = link_index;
7672         else
7673                 aencoder->encoder_id = -1;
7674
7675         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7676
7677         return res;
7678 }
7679
7680 static void manage_dm_interrupts(struct amdgpu_device *adev,
7681                                  struct amdgpu_crtc *acrtc,
7682                                  bool enable)
7683 {
7684         /*
7685          * We have no guarantee that the frontend index maps to the same
7686          * backend index - some even map to more than one.
7687          *
7688          * TODO: Use a different interrupt or check DC itself for the mapping.
7689          */
7690         int irq_type =
7691                 amdgpu_display_crtc_idx_to_irq_type(
7692                         adev,
7693                         acrtc->crtc_id);
7694
7695         if (enable) {
7696                 drm_crtc_vblank_on(&acrtc->base);
7697                 amdgpu_irq_get(
7698                         adev,
7699                         &adev->pageflip_irq,
7700                         irq_type);
7701 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7702                 amdgpu_irq_get(
7703                         adev,
7704                         &adev->vline0_irq,
7705                         irq_type);
7706 #endif
7707         } else {
7708 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7709                 amdgpu_irq_put(
7710                         adev,
7711                         &adev->vline0_irq,
7712                         irq_type);
7713 #endif
7714                 amdgpu_irq_put(
7715                         adev,
7716                         &adev->pageflip_irq,
7717                         irq_type);
7718                 drm_crtc_vblank_off(&acrtc->base);
7719         }
7720 }
7721
7722 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7723                                       struct amdgpu_crtc *acrtc)
7724 {
7725         int irq_type =
7726                 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7727
7728         /**
7729          * This reads the current state for the IRQ and force reapplies
7730          * the setting to hardware.
7731          */
7732         amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7733 }
7734
7735 static bool
7736 is_scaling_state_different(const struct dm_connector_state *dm_state,
7737                            const struct dm_connector_state *old_dm_state)
7738 {
7739         if (dm_state->scaling != old_dm_state->scaling)
7740                 return true;
7741         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7742                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7743                         return true;
7744         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7745                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7746                         return true;
7747         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7748                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7749                 return true;
7750         return false;
7751 }
7752
7753 #ifdef CONFIG_DRM_AMD_DC_HDCP
7754 static bool is_content_protection_different(struct drm_connector_state *state,
7755                                             const struct drm_connector_state *old_state,
7756                                             const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7757 {
7758         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7759         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7760
7761         /* Handle: Type0/1 change */
7762         if (old_state->hdcp_content_type != state->hdcp_content_type &&
7763             state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7764                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7765                 return true;
7766         }
7767
7768         /* CP is being re enabled, ignore this
7769          *
7770          * Handles:     ENABLED -> DESIRED
7771          */
7772         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7773             state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7774                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7775                 return false;
7776         }
7777
7778         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7779          *
7780          * Handles:     UNDESIRED -> ENABLED
7781          */
7782         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7783             state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7784                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7785
7786         /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7787          * hot-plug, headless s3, dpms
7788          *
7789          * Handles:     DESIRED -> DESIRED (Special case)
7790          */
7791         if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7792             connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7793                 dm_con_state->update_hdcp = false;
7794                 return true;
7795         }
7796
7797         /*
7798          * Handles:     UNDESIRED -> UNDESIRED
7799          *              DESIRED -> DESIRED
7800          *              ENABLED -> ENABLED
7801          */
7802         if (old_state->content_protection == state->content_protection)
7803                 return false;
7804
7805         /*
7806          * Handles:     UNDESIRED -> DESIRED
7807          *              DESIRED -> UNDESIRED
7808          *              ENABLED -> UNDESIRED
7809          */
7810         if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7811                 return true;
7812
7813         /*
7814          * Handles:     DESIRED -> ENABLED
7815          */
7816         return false;
7817 }
7818
7819 #endif
7820 static void remove_stream(struct amdgpu_device *adev,
7821                           struct amdgpu_crtc *acrtc,
7822                           struct dc_stream_state *stream)
7823 {
7824         /* this is the update mode case */
7825
7826         acrtc->otg_inst = -1;
7827         acrtc->enabled = false;
7828 }
7829
7830 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7831                                struct dc_cursor_position *position)
7832 {
7833         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7834         int x, y;
7835         int xorigin = 0, yorigin = 0;
7836
7837         if (!crtc || !plane->state->fb)
7838                 return 0;
7839
7840         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7841             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7842                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7843                           __func__,
7844                           plane->state->crtc_w,
7845                           plane->state->crtc_h);
7846                 return -EINVAL;
7847         }
7848
7849         x = plane->state->crtc_x;
7850         y = plane->state->crtc_y;
7851
7852         if (x <= -amdgpu_crtc->max_cursor_width ||
7853             y <= -amdgpu_crtc->max_cursor_height)
7854                 return 0;
7855
7856         if (x < 0) {
7857                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
7858                 x = 0;
7859         }
7860         if (y < 0) {
7861                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
7862                 y = 0;
7863         }
7864         position->enable = true;
7865         position->translate_by_source = true;
7866         position->x = x;
7867         position->y = y;
7868         position->x_hotspot = xorigin;
7869         position->y_hotspot = yorigin;
7870
7871         return 0;
7872 }
7873
7874 static void handle_cursor_update(struct drm_plane *plane,
7875                                  struct drm_plane_state *old_plane_state)
7876 {
7877         struct amdgpu_device *adev = drm_to_adev(plane->dev);
7878         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
7879         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
7880         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
7881         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7882         uint64_t address = afb ? afb->address : 0;
7883         struct dc_cursor_position position = {0};
7884         struct dc_cursor_attributes attributes;
7885         int ret;
7886
7887         if (!plane->state->fb && !old_plane_state->fb)
7888                 return;
7889
7890         DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
7891                       __func__,
7892                       amdgpu_crtc->crtc_id,
7893                       plane->state->crtc_w,
7894                       plane->state->crtc_h);
7895
7896         ret = get_cursor_position(plane, crtc, &position);
7897         if (ret)
7898                 return;
7899
7900         if (!position.enable) {
7901                 /* turn off cursor */
7902                 if (crtc_state && crtc_state->stream) {
7903                         mutex_lock(&adev->dm.dc_lock);
7904                         dc_stream_set_cursor_position(crtc_state->stream,
7905                                                       &position);
7906                         mutex_unlock(&adev->dm.dc_lock);
7907                 }
7908                 return;
7909         }
7910
7911         amdgpu_crtc->cursor_width = plane->state->crtc_w;
7912         amdgpu_crtc->cursor_height = plane->state->crtc_h;
7913
7914         memset(&attributes, 0, sizeof(attributes));
7915         attributes.address.high_part = upper_32_bits(address);
7916         attributes.address.low_part  = lower_32_bits(address);
7917         attributes.width             = plane->state->crtc_w;
7918         attributes.height            = plane->state->crtc_h;
7919         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
7920         attributes.rotation_angle    = 0;
7921         attributes.attribute_flags.value = 0;
7922
7923         attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
7924
7925         if (crtc_state->stream) {
7926                 mutex_lock(&adev->dm.dc_lock);
7927                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
7928                                                          &attributes))
7929                         DRM_ERROR("DC failed to set cursor attributes\n");
7930
7931                 if (!dc_stream_set_cursor_position(crtc_state->stream,
7932                                                    &position))
7933                         DRM_ERROR("DC failed to set cursor position\n");
7934                 mutex_unlock(&adev->dm.dc_lock);
7935         }
7936 }
7937
7938 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7939 {
7940
7941         assert_spin_locked(&acrtc->base.dev->event_lock);
7942         WARN_ON(acrtc->event);
7943
7944         acrtc->event = acrtc->base.state->event;
7945
7946         /* Set the flip status */
7947         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7948
7949         /* Mark this event as consumed */
7950         acrtc->base.state->event = NULL;
7951
7952         DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7953                      acrtc->crtc_id);
7954 }
7955
7956 static void update_freesync_state_on_stream(
7957         struct amdgpu_display_manager *dm,
7958         struct dm_crtc_state *new_crtc_state,
7959         struct dc_stream_state *new_stream,
7960         struct dc_plane_state *surface,
7961         u32 flip_timestamp_in_us)
7962 {
7963         struct mod_vrr_params vrr_params;
7964         struct dc_info_packet vrr_infopacket = {0};
7965         struct amdgpu_device *adev = dm->adev;
7966         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7967         unsigned long flags;
7968         bool pack_sdp_v1_3 = false;
7969
7970         if (!new_stream)
7971                 return;
7972
7973         /*
7974          * TODO: Determine why min/max totals and vrefresh can be 0 here.
7975          * For now it's sufficient to just guard against these conditions.
7976          */
7977
7978         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7979                 return;
7980
7981         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7982         vrr_params = acrtc->dm_irq_params.vrr_params;
7983
7984         if (surface) {
7985                 mod_freesync_handle_preflip(
7986                         dm->freesync_module,
7987                         surface,
7988                         new_stream,
7989                         flip_timestamp_in_us,
7990                         &vrr_params);
7991
7992                 if (adev->family < AMDGPU_FAMILY_AI &&
7993                     amdgpu_dm_vrr_active(new_crtc_state)) {
7994                         mod_freesync_handle_v_update(dm->freesync_module,
7995                                                      new_stream, &vrr_params);
7996
7997                         /* Need to call this before the frame ends. */
7998                         dc_stream_adjust_vmin_vmax(dm->dc,
7999                                                    new_crtc_state->stream,
8000                                                    &vrr_params.adjust);
8001                 }
8002         }
8003
8004         mod_freesync_build_vrr_infopacket(
8005                 dm->freesync_module,
8006                 new_stream,
8007                 &vrr_params,
8008                 PACKET_TYPE_VRR,
8009                 TRANSFER_FUNC_UNKNOWN,
8010                 &vrr_infopacket,
8011                 pack_sdp_v1_3);
8012
8013         new_crtc_state->freesync_timing_changed |=
8014                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8015                         &vrr_params.adjust,
8016                         sizeof(vrr_params.adjust)) != 0);
8017
8018         new_crtc_state->freesync_vrr_info_changed |=
8019                 (memcmp(&new_crtc_state->vrr_infopacket,
8020                         &vrr_infopacket,
8021                         sizeof(vrr_infopacket)) != 0);
8022
8023         acrtc->dm_irq_params.vrr_params = vrr_params;
8024         new_crtc_state->vrr_infopacket = vrr_infopacket;
8025
8026         new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8027         new_stream->vrr_infopacket = vrr_infopacket;
8028
8029         if (new_crtc_state->freesync_vrr_info_changed)
8030                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8031                               new_crtc_state->base.crtc->base.id,
8032                               (int)new_crtc_state->base.vrr_enabled,
8033                               (int)vrr_params.state);
8034
8035         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8036 }
8037
8038 static void update_stream_irq_parameters(
8039         struct amdgpu_display_manager *dm,
8040         struct dm_crtc_state *new_crtc_state)
8041 {
8042         struct dc_stream_state *new_stream = new_crtc_state->stream;
8043         struct mod_vrr_params vrr_params;
8044         struct mod_freesync_config config = new_crtc_state->freesync_config;
8045         struct amdgpu_device *adev = dm->adev;
8046         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8047         unsigned long flags;
8048
8049         if (!new_stream)
8050                 return;
8051
8052         /*
8053          * TODO: Determine why min/max totals and vrefresh can be 0 here.
8054          * For now it's sufficient to just guard against these conditions.
8055          */
8056         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8057                 return;
8058
8059         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8060         vrr_params = acrtc->dm_irq_params.vrr_params;
8061
8062         if (new_crtc_state->vrr_supported &&
8063             config.min_refresh_in_uhz &&
8064             config.max_refresh_in_uhz) {
8065                 /*
8066                  * if freesync compatible mode was set, config.state will be set
8067                  * in atomic check
8068                  */
8069                 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8070                     (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8071                      new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8072                         vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8073                         vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8074                         vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8075                         vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8076                 } else {
8077                         config.state = new_crtc_state->base.vrr_enabled ?
8078                                                      VRR_STATE_ACTIVE_VARIABLE :
8079                                                      VRR_STATE_INACTIVE;
8080                 }
8081         } else {
8082                 config.state = VRR_STATE_UNSUPPORTED;
8083         }
8084
8085         mod_freesync_build_vrr_params(dm->freesync_module,
8086                                       new_stream,
8087                                       &config, &vrr_params);
8088
8089         new_crtc_state->freesync_timing_changed |=
8090                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8091                         &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8092
8093         new_crtc_state->freesync_config = config;
8094         /* Copy state for access from DM IRQ handler */
8095         acrtc->dm_irq_params.freesync_config = config;
8096         acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8097         acrtc->dm_irq_params.vrr_params = vrr_params;
8098         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8099 }
8100
8101 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8102                                             struct dm_crtc_state *new_state)
8103 {
8104         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8105         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8106
8107         if (!old_vrr_active && new_vrr_active) {
8108                 /* Transition VRR inactive -> active:
8109                  * While VRR is active, we must not disable vblank irq, as a
8110                  * reenable after disable would compute bogus vblank/pflip
8111                  * timestamps if it likely happened inside display front-porch.
8112                  *
8113                  * We also need vupdate irq for the actual core vblank handling
8114                  * at end of vblank.
8115                  */
8116                 dm_set_vupdate_irq(new_state->base.crtc, true);
8117                 drm_crtc_vblank_get(new_state->base.crtc);
8118                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8119                                  __func__, new_state->base.crtc->base.id);
8120         } else if (old_vrr_active && !new_vrr_active) {
8121                 /* Transition VRR active -> inactive:
8122                  * Allow vblank irq disable again for fixed refresh rate.
8123                  */
8124                 dm_set_vupdate_irq(new_state->base.crtc, false);
8125                 drm_crtc_vblank_put(new_state->base.crtc);
8126                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8127                                  __func__, new_state->base.crtc->base.id);
8128         }
8129 }
8130
8131 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8132 {
8133         struct drm_plane *plane;
8134         struct drm_plane_state *old_plane_state, *new_plane_state;
8135         int i;
8136
8137         /*
8138          * TODO: Make this per-stream so we don't issue redundant updates for
8139          * commits with multiple streams.
8140          */
8141         for_each_oldnew_plane_in_state(state, plane, old_plane_state,
8142                                        new_plane_state, i)
8143                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8144                         handle_cursor_update(plane, old_plane_state);
8145 }
8146
8147 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8148                                     struct dc_state *dc_state,
8149                                     struct drm_device *dev,
8150                                     struct amdgpu_display_manager *dm,
8151                                     struct drm_crtc *pcrtc,
8152                                     bool wait_for_vblank)
8153 {
8154         uint32_t i;
8155         uint64_t timestamp_ns;
8156         struct drm_plane *plane;
8157         struct drm_plane_state *old_plane_state, *new_plane_state;
8158         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8159         struct drm_crtc_state *new_pcrtc_state =
8160                         drm_atomic_get_new_crtc_state(state, pcrtc);
8161         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8162         struct dm_crtc_state *dm_old_crtc_state =
8163                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8164         int planes_count = 0, vpos, hpos;
8165         long r;
8166         unsigned long flags;
8167         struct amdgpu_bo *abo;
8168         uint32_t target_vblank, last_flip_vblank;
8169         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8170         bool pflip_present = false;
8171         struct {
8172                 struct dc_surface_update surface_updates[MAX_SURFACES];
8173                 struct dc_plane_info plane_infos[MAX_SURFACES];
8174                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8175                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8176                 struct dc_stream_update stream_update;
8177         } *bundle;
8178
8179         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8180
8181         if (!bundle) {
8182                 dm_error("Failed to allocate update bundle\n");
8183                 goto cleanup;
8184         }
8185
8186         /*
8187          * Disable the cursor first if we're disabling all the planes.
8188          * It'll remain on the screen after the planes are re-enabled
8189          * if we don't.
8190          */
8191         if (acrtc_state->active_planes == 0)
8192                 amdgpu_dm_commit_cursors(state);
8193
8194         /* update planes when needed */
8195         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8196                 struct drm_crtc *crtc = new_plane_state->crtc;
8197                 struct drm_crtc_state *new_crtc_state;
8198                 struct drm_framebuffer *fb = new_plane_state->fb;
8199                 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8200                 bool plane_needs_flip;
8201                 struct dc_plane_state *dc_plane;
8202                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8203
8204                 /* Cursor plane is handled after stream updates */
8205                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8206                         continue;
8207
8208                 if (!fb || !crtc || pcrtc != crtc)
8209                         continue;
8210
8211                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8212                 if (!new_crtc_state->active)
8213                         continue;
8214
8215                 dc_plane = dm_new_plane_state->dc_state;
8216
8217                 bundle->surface_updates[planes_count].surface = dc_plane;
8218                 if (new_pcrtc_state->color_mgmt_changed) {
8219                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8220                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8221                         bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8222                 }
8223
8224                 fill_dc_scaling_info(new_plane_state,
8225                                      &bundle->scaling_infos[planes_count]);
8226
8227                 bundle->surface_updates[planes_count].scaling_info =
8228                         &bundle->scaling_infos[planes_count];
8229
8230                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8231
8232                 pflip_present = pflip_present || plane_needs_flip;
8233
8234                 if (!plane_needs_flip) {
8235                         planes_count += 1;
8236                         continue;
8237                 }
8238
8239                 abo = gem_to_amdgpu_bo(fb->obj[0]);
8240
8241                 /*
8242                  * Wait for all fences on this FB. Do limited wait to avoid
8243                  * deadlock during GPU reset when this fence will not signal
8244                  * but we hold reservation lock for the BO.
8245                  */
8246                 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
8247                                                         false,
8248                                                         msecs_to_jiffies(5000));
8249                 if (unlikely(r <= 0))
8250                         DRM_ERROR("Waiting for fences timed out!");
8251
8252                 fill_dc_plane_info_and_addr(
8253                         dm->adev, new_plane_state,
8254                         afb->tiling_flags,
8255                         &bundle->plane_infos[planes_count],
8256                         &bundle->flip_addrs[planes_count].address,
8257                         afb->tmz_surface, false);
8258
8259                 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
8260                                  new_plane_state->plane->index,
8261                                  bundle->plane_infos[planes_count].dcc.enable);
8262
8263                 bundle->surface_updates[planes_count].plane_info =
8264                         &bundle->plane_infos[planes_count];
8265
8266                 /*
8267                  * Only allow immediate flips for fast updates that don't
8268                  * change FB pitch, DCC state, rotation or mirroing.
8269                  */
8270                 bundle->flip_addrs[planes_count].flip_immediate =
8271                         crtc->state->async_flip &&
8272                         acrtc_state->update_type == UPDATE_TYPE_FAST;
8273
8274                 timestamp_ns = ktime_get_ns();
8275                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8276                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8277                 bundle->surface_updates[planes_count].surface = dc_plane;
8278
8279                 if (!bundle->surface_updates[planes_count].surface) {
8280                         DRM_ERROR("No surface for CRTC: id=%d\n",
8281                                         acrtc_attach->crtc_id);
8282                         continue;
8283                 }
8284
8285                 if (plane == pcrtc->primary)
8286                         update_freesync_state_on_stream(
8287                                 dm,
8288                                 acrtc_state,
8289                                 acrtc_state->stream,
8290                                 dc_plane,
8291                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
8292
8293                 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
8294                                  __func__,
8295                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8296                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
8297
8298                 planes_count += 1;
8299
8300         }
8301
8302         if (pflip_present) {
8303                 if (!vrr_active) {
8304                         /* Use old throttling in non-vrr fixed refresh rate mode
8305                          * to keep flip scheduling based on target vblank counts
8306                          * working in a backwards compatible way, e.g., for
8307                          * clients using the GLX_OML_sync_control extension or
8308                          * DRI3/Present extension with defined target_msc.
8309                          */
8310                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
8311                 }
8312                 else {
8313                         /* For variable refresh rate mode only:
8314                          * Get vblank of last completed flip to avoid > 1 vrr
8315                          * flips per video frame by use of throttling, but allow
8316                          * flip programming anywhere in the possibly large
8317                          * variable vrr vblank interval for fine-grained flip
8318                          * timing control and more opportunity to avoid stutter
8319                          * on late submission of flips.
8320                          */
8321                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8322                         last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
8323                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8324                 }
8325
8326                 target_vblank = last_flip_vblank + wait_for_vblank;
8327
8328                 /*
8329                  * Wait until we're out of the vertical blank period before the one
8330                  * targeted by the flip
8331                  */
8332                 while ((acrtc_attach->enabled &&
8333                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8334                                                             0, &vpos, &hpos, NULL,
8335                                                             NULL, &pcrtc->hwmode)
8336                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8337                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8338                         (int)(target_vblank -
8339                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8340                         usleep_range(1000, 1100);
8341                 }
8342
8343                 /**
8344                  * Prepare the flip event for the pageflip interrupt to handle.
8345                  *
8346                  * This only works in the case where we've already turned on the
8347                  * appropriate hardware blocks (eg. HUBP) so in the transition case
8348                  * from 0 -> n planes we have to skip a hardware generated event
8349                  * and rely on sending it from software.
8350                  */
8351                 if (acrtc_attach->base.state->event &&
8352                     acrtc_state->active_planes > 0) {
8353                         drm_crtc_vblank_get(pcrtc);
8354
8355                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8356
8357                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8358                         prepare_flip_isr(acrtc_attach);
8359
8360                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8361                 }
8362
8363                 if (acrtc_state->stream) {
8364                         if (acrtc_state->freesync_vrr_info_changed)
8365                                 bundle->stream_update.vrr_infopacket =
8366                                         &acrtc_state->stream->vrr_infopacket;
8367                 }
8368         }
8369
8370         /* Update the planes if changed or disable if we don't have any. */
8371         if ((planes_count || acrtc_state->active_planes == 0) &&
8372                 acrtc_state->stream) {
8373                 bundle->stream_update.stream = acrtc_state->stream;
8374                 if (new_pcrtc_state->mode_changed) {
8375                         bundle->stream_update.src = acrtc_state->stream->src;
8376                         bundle->stream_update.dst = acrtc_state->stream->dst;
8377                 }
8378
8379                 if (new_pcrtc_state->color_mgmt_changed) {
8380                         /*
8381                          * TODO: This isn't fully correct since we've actually
8382                          * already modified the stream in place.
8383                          */
8384                         bundle->stream_update.gamut_remap =
8385                                 &acrtc_state->stream->gamut_remap_matrix;
8386                         bundle->stream_update.output_csc_transform =
8387                                 &acrtc_state->stream->csc_color_matrix;
8388                         bundle->stream_update.out_transfer_func =
8389                                 acrtc_state->stream->out_transfer_func;
8390                 }
8391
8392                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
8393                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
8394                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
8395
8396                 /*
8397                  * If FreeSync state on the stream has changed then we need to
8398                  * re-adjust the min/max bounds now that DC doesn't handle this
8399                  * as part of commit.
8400                  */
8401                 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
8402                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8403                         dc_stream_adjust_vmin_vmax(
8404                                 dm->dc, acrtc_state->stream,
8405                                 &acrtc_attach->dm_irq_params.vrr_params.adjust);
8406                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8407                 }
8408                 mutex_lock(&dm->dc_lock);
8409                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8410                                 acrtc_state->stream->link->psr_settings.psr_allow_active)
8411                         amdgpu_dm_psr_disable(acrtc_state->stream);
8412
8413                 dc_commit_updates_for_stream(dm->dc,
8414                                                      bundle->surface_updates,
8415                                                      planes_count,
8416                                                      acrtc_state->stream,
8417                                                      &bundle->stream_update,
8418                                                      dc_state);
8419
8420                 /**
8421                  * Enable or disable the interrupts on the backend.
8422                  *
8423                  * Most pipes are put into power gating when unused.
8424                  *
8425                  * When power gating is enabled on a pipe we lose the
8426                  * interrupt enablement state when power gating is disabled.
8427                  *
8428                  * So we need to update the IRQ control state in hardware
8429                  * whenever the pipe turns on (since it could be previously
8430                  * power gated) or off (since some pipes can't be power gated
8431                  * on some ASICs).
8432                  */
8433                 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8434                         dm_update_pflip_irq_state(drm_to_adev(dev),
8435                                                   acrtc_attach);
8436
8437                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8438                                 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8439                                 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8440                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
8441                 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
8442                                 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8443                                 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
8444                         amdgpu_dm_psr_enable(acrtc_state->stream);
8445                 }
8446
8447                 mutex_unlock(&dm->dc_lock);
8448         }
8449
8450         /*
8451          * Update cursor state *after* programming all the planes.
8452          * This avoids redundant programming in the case where we're going
8453          * to be disabling a single plane - those pipes are being disabled.
8454          */
8455         if (acrtc_state->active_planes)
8456                 amdgpu_dm_commit_cursors(state);
8457
8458 cleanup:
8459         kfree(bundle);
8460 }
8461
8462 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8463                                    struct drm_atomic_state *state)
8464 {
8465         struct amdgpu_device *adev = drm_to_adev(dev);
8466         struct amdgpu_dm_connector *aconnector;
8467         struct drm_connector *connector;
8468         struct drm_connector_state *old_con_state, *new_con_state;
8469         struct drm_crtc_state *new_crtc_state;
8470         struct dm_crtc_state *new_dm_crtc_state;
8471         const struct dc_stream_status *status;
8472         int i, inst;
8473
8474         /* Notify device removals. */
8475         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8476                 if (old_con_state->crtc != new_con_state->crtc) {
8477                         /* CRTC changes require notification. */
8478                         goto notify;
8479                 }
8480
8481                 if (!new_con_state->crtc)
8482                         continue;
8483
8484                 new_crtc_state = drm_atomic_get_new_crtc_state(
8485                         state, new_con_state->crtc);
8486
8487                 if (!new_crtc_state)
8488                         continue;
8489
8490                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8491                         continue;
8492
8493         notify:
8494                 aconnector = to_amdgpu_dm_connector(connector);
8495
8496                 mutex_lock(&adev->dm.audio_lock);
8497                 inst = aconnector->audio_inst;
8498                 aconnector->audio_inst = -1;
8499                 mutex_unlock(&adev->dm.audio_lock);
8500
8501                 amdgpu_dm_audio_eld_notify(adev, inst);
8502         }
8503
8504         /* Notify audio device additions. */
8505         for_each_new_connector_in_state(state, connector, new_con_state, i) {
8506                 if (!new_con_state->crtc)
8507                         continue;
8508
8509                 new_crtc_state = drm_atomic_get_new_crtc_state(
8510                         state, new_con_state->crtc);
8511
8512                 if (!new_crtc_state)
8513                         continue;
8514
8515                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8516                         continue;
8517
8518                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8519                 if (!new_dm_crtc_state->stream)
8520                         continue;
8521
8522                 status = dc_stream_get_status(new_dm_crtc_state->stream);
8523                 if (!status)
8524                         continue;
8525
8526                 aconnector = to_amdgpu_dm_connector(connector);
8527
8528                 mutex_lock(&adev->dm.audio_lock);
8529                 inst = status->audio_inst;
8530                 aconnector->audio_inst = inst;
8531                 mutex_unlock(&adev->dm.audio_lock);
8532
8533                 amdgpu_dm_audio_eld_notify(adev, inst);
8534         }
8535 }
8536
8537 /*
8538  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8539  * @crtc_state: the DRM CRTC state
8540  * @stream_state: the DC stream state.
8541  *
8542  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8543  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8544  */
8545 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8546                                                 struct dc_stream_state *stream_state)
8547 {
8548         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8549 }
8550
8551 /**
8552  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8553  * @state: The atomic state to commit
8554  *
8555  * This will tell DC to commit the constructed DC state from atomic_check,
8556  * programming the hardware. Any failures here implies a hardware failure, since
8557  * atomic check should have filtered anything non-kosher.
8558  */
8559 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8560 {
8561         struct drm_device *dev = state->dev;
8562         struct amdgpu_device *adev = drm_to_adev(dev);
8563         struct amdgpu_display_manager *dm = &adev->dm;
8564         struct dm_atomic_state *dm_state;
8565         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8566         uint32_t i, j;
8567         struct drm_crtc *crtc;
8568         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8569         unsigned long flags;
8570         bool wait_for_vblank = true;
8571         struct drm_connector *connector;
8572         struct drm_connector_state *old_con_state, *new_con_state;
8573         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8574         int crtc_disable_count = 0;
8575         bool mode_set_reset_required = false;
8576
8577         trace_amdgpu_dm_atomic_commit_tail_begin(state);
8578
8579         drm_atomic_helper_update_legacy_modeset_state(dev, state);
8580
8581         dm_state = dm_atomic_get_new_state(state);
8582         if (dm_state && dm_state->context) {
8583                 dc_state = dm_state->context;
8584         } else {
8585                 /* No state changes, retain current state. */
8586                 dc_state_temp = dc_create_state(dm->dc);
8587                 ASSERT(dc_state_temp);
8588                 dc_state = dc_state_temp;
8589                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8590         }
8591
8592         for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8593                                        new_crtc_state, i) {
8594                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8595
8596                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8597
8598                 if (old_crtc_state->active &&
8599                     (!new_crtc_state->active ||
8600                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8601                         manage_dm_interrupts(adev, acrtc, false);
8602                         dc_stream_release(dm_old_crtc_state->stream);
8603                 }
8604         }
8605
8606         drm_atomic_helper_calc_timestamping_constants(state);
8607
8608         /* update changed items */
8609         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8610                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8611
8612                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8613                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8614
8615                 DRM_DEBUG_ATOMIC(
8616                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8617                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8618                         "connectors_changed:%d\n",
8619                         acrtc->crtc_id,
8620                         new_crtc_state->enable,
8621                         new_crtc_state->active,
8622                         new_crtc_state->planes_changed,
8623                         new_crtc_state->mode_changed,
8624                         new_crtc_state->active_changed,
8625                         new_crtc_state->connectors_changed);
8626
8627                 /* Disable cursor if disabling crtc */
8628                 if (old_crtc_state->active && !new_crtc_state->active) {
8629                         struct dc_cursor_position position;
8630
8631                         memset(&position, 0, sizeof(position));
8632                         mutex_lock(&dm->dc_lock);
8633                         dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8634                         mutex_unlock(&dm->dc_lock);
8635                 }
8636
8637                 /* Copy all transient state flags into dc state */
8638                 if (dm_new_crtc_state->stream) {
8639                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8640                                                             dm_new_crtc_state->stream);
8641                 }
8642
8643                 /* handles headless hotplug case, updating new_state and
8644                  * aconnector as needed
8645                  */
8646
8647                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8648
8649                         DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8650
8651                         if (!dm_new_crtc_state->stream) {
8652                                 /*
8653                                  * this could happen because of issues with
8654                                  * userspace notifications delivery.
8655                                  * In this case userspace tries to set mode on
8656                                  * display which is disconnected in fact.
8657                                  * dc_sink is NULL in this case on aconnector.
8658                                  * We expect reset mode will come soon.
8659                                  *
8660                                  * This can also happen when unplug is done
8661                                  * during resume sequence ended
8662                                  *
8663                                  * In this case, we want to pretend we still
8664                                  * have a sink to keep the pipe running so that
8665                                  * hw state is consistent with the sw state
8666                                  */
8667                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8668                                                 __func__, acrtc->base.base.id);
8669                                 continue;
8670                         }
8671
8672                         if (dm_old_crtc_state->stream)
8673                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8674
8675                         pm_runtime_get_noresume(dev->dev);
8676
8677                         acrtc->enabled = true;
8678                         acrtc->hw_mode = new_crtc_state->mode;
8679                         crtc->hwmode = new_crtc_state->mode;
8680                         mode_set_reset_required = true;
8681                 } else if (modereset_required(new_crtc_state)) {
8682                         DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8683                         /* i.e. reset mode */
8684                         if (dm_old_crtc_state->stream)
8685                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8686
8687                         mode_set_reset_required = true;
8688                 }
8689         } /* for_each_crtc_in_state() */
8690
8691         if (dc_state) {
8692                 /* if there mode set or reset, disable eDP PSR */
8693                 if (mode_set_reset_required)
8694                         amdgpu_dm_psr_disable_all(dm);
8695
8696                 dm_enable_per_frame_crtc_master_sync(dc_state);
8697                 mutex_lock(&dm->dc_lock);
8698                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
8699 #if defined(CONFIG_DRM_AMD_DC_DCN)
8700                /* Allow idle optimization when vblank count is 0 for display off */
8701                if (dm->active_vblank_irq_count == 0)
8702                    dc_allow_idle_optimizations(dm->dc,true);
8703 #endif
8704                 mutex_unlock(&dm->dc_lock);
8705         }
8706
8707         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8708                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8709
8710                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8711
8712                 if (dm_new_crtc_state->stream != NULL) {
8713                         const struct dc_stream_status *status =
8714                                         dc_stream_get_status(dm_new_crtc_state->stream);
8715
8716                         if (!status)
8717                                 status = dc_stream_get_status_from_state(dc_state,
8718                                                                          dm_new_crtc_state->stream);
8719                         if (!status)
8720                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8721                         else
8722                                 acrtc->otg_inst = status->primary_otg_inst;
8723                 }
8724         }
8725 #ifdef CONFIG_DRM_AMD_DC_HDCP
8726         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8727                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8728                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8729                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8730
8731                 new_crtc_state = NULL;
8732
8733                 if (acrtc)
8734                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8735
8736                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8737
8738                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8739                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8740                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8741                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8742                         dm_new_con_state->update_hdcp = true;
8743                         continue;
8744                 }
8745
8746                 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8747                         hdcp_update_display(
8748                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8749                                 new_con_state->hdcp_content_type,
8750                                 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
8751         }
8752 #endif
8753
8754         /* Handle connector state changes */
8755         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8756                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8757                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8758                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8759                 struct dc_surface_update dummy_updates[MAX_SURFACES];
8760                 struct dc_stream_update stream_update;
8761                 struct dc_info_packet hdr_packet;
8762                 struct dc_stream_status *status = NULL;
8763                 bool abm_changed, hdr_changed, scaling_changed;
8764
8765                 memset(&dummy_updates, 0, sizeof(dummy_updates));
8766                 memset(&stream_update, 0, sizeof(stream_update));
8767
8768                 if (acrtc) {
8769                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8770                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8771                 }
8772
8773                 /* Skip any modesets/resets */
8774                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8775                         continue;
8776
8777                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8778                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8779
8780                 scaling_changed = is_scaling_state_different(dm_new_con_state,
8781                                                              dm_old_con_state);
8782
8783                 abm_changed = dm_new_crtc_state->abm_level !=
8784                               dm_old_crtc_state->abm_level;
8785
8786                 hdr_changed =
8787                         !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
8788
8789                 if (!scaling_changed && !abm_changed && !hdr_changed)
8790                         continue;
8791
8792                 stream_update.stream = dm_new_crtc_state->stream;
8793                 if (scaling_changed) {
8794                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8795                                         dm_new_con_state, dm_new_crtc_state->stream);
8796
8797                         stream_update.src = dm_new_crtc_state->stream->src;
8798                         stream_update.dst = dm_new_crtc_state->stream->dst;
8799                 }
8800
8801                 if (abm_changed) {
8802                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8803
8804                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
8805                 }
8806
8807                 if (hdr_changed) {
8808                         fill_hdr_info_packet(new_con_state, &hdr_packet);
8809                         stream_update.hdr_static_metadata = &hdr_packet;
8810                 }
8811
8812                 status = dc_stream_get_status(dm_new_crtc_state->stream);
8813                 WARN_ON(!status);
8814                 WARN_ON(!status->plane_count);
8815
8816                 /*
8817                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
8818                  * Here we create an empty update on each plane.
8819                  * To fix this, DC should permit updating only stream properties.
8820                  */
8821                 for (j = 0; j < status->plane_count; j++)
8822                         dummy_updates[j].surface = status->plane_states[0];
8823
8824
8825                 mutex_lock(&dm->dc_lock);
8826                 dc_commit_updates_for_stream(dm->dc,
8827                                                      dummy_updates,
8828                                                      status->plane_count,
8829                                                      dm_new_crtc_state->stream,
8830                                                      &stream_update,
8831                                                      dc_state);
8832                 mutex_unlock(&dm->dc_lock);
8833         }
8834
8835         /* Count number of newly disabled CRTCs for dropping PM refs later. */
8836         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8837                                       new_crtc_state, i) {
8838                 if (old_crtc_state->active && !new_crtc_state->active)
8839                         crtc_disable_count++;
8840
8841                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8842                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8843
8844                 /* For freesync config update on crtc state and params for irq */
8845                 update_stream_irq_parameters(dm, dm_new_crtc_state);
8846
8847                 /* Handle vrr on->off / off->on transitions */
8848                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8849                                                 dm_new_crtc_state);
8850         }
8851
8852         /**
8853          * Enable interrupts for CRTCs that are newly enabled or went through
8854          * a modeset. It was intentionally deferred until after the front end
8855          * state was modified to wait until the OTG was on and so the IRQ
8856          * handlers didn't access stale or invalid state.
8857          */
8858         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8859                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8860 #ifdef CONFIG_DEBUG_FS
8861                 bool configure_crc = false;
8862                 enum amdgpu_dm_pipe_crc_source cur_crc_src;
8863 #endif
8864                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8865
8866                 if (new_crtc_state->active &&
8867                     (!old_crtc_state->active ||
8868                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8869                         dc_stream_retain(dm_new_crtc_state->stream);
8870                         acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8871                         manage_dm_interrupts(adev, acrtc, true);
8872
8873 #ifdef CONFIG_DEBUG_FS
8874                         /**
8875                          * Frontend may have changed so reapply the CRC capture
8876                          * settings for the stream.
8877                          */
8878                         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8879                         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8880                         cur_crc_src = acrtc->dm_irq_params.crc_src;
8881                         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8882
8883                         if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
8884                                 configure_crc = true;
8885 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8886                                 if (amdgpu_dm_crc_window_is_activated(crtc))
8887                                         configure_crc = false;
8888 #endif
8889                         }
8890
8891                         if (configure_crc)
8892                                 amdgpu_dm_crtc_configure_crc_source(
8893                                         crtc, dm_new_crtc_state, cur_crc_src);
8894 #endif
8895                 }
8896         }
8897
8898         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
8899                 if (new_crtc_state->async_flip)
8900                         wait_for_vblank = false;
8901
8902         /* update planes when needed per crtc*/
8903         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
8904                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8905
8906                 if (dm_new_crtc_state->stream)
8907                         amdgpu_dm_commit_planes(state, dc_state, dev,
8908                                                 dm, crtc, wait_for_vblank);
8909         }
8910
8911         /* Update audio instances for each connector. */
8912         amdgpu_dm_commit_audio(dev, state);
8913
8914         /*
8915          * send vblank event on all events not handled in flip and
8916          * mark consumed event for drm_atomic_helper_commit_hw_done
8917          */
8918         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8919         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8920
8921                 if (new_crtc_state->event)
8922                         drm_send_event_locked(dev, &new_crtc_state->event->base);
8923
8924                 new_crtc_state->event = NULL;
8925         }
8926         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8927
8928         /* Signal HW programming completion */
8929         drm_atomic_helper_commit_hw_done(state);
8930
8931         if (wait_for_vblank)
8932                 drm_atomic_helper_wait_for_flip_done(dev, state);
8933
8934         drm_atomic_helper_cleanup_planes(dev, state);
8935
8936         /* return the stolen vga memory back to VRAM */
8937         if (!adev->mman.keep_stolen_vga_memory)
8938                 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8939         amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8940
8941         /*
8942          * Finally, drop a runtime PM reference for each newly disabled CRTC,
8943          * so we can put the GPU into runtime suspend if we're not driving any
8944          * displays anymore
8945          */
8946         for (i = 0; i < crtc_disable_count; i++)
8947                 pm_runtime_put_autosuspend(dev->dev);
8948         pm_runtime_mark_last_busy(dev->dev);
8949
8950         if (dc_state_temp)
8951                 dc_release_state(dc_state_temp);
8952 }
8953
8954
8955 static int dm_force_atomic_commit(struct drm_connector *connector)
8956 {
8957         int ret = 0;
8958         struct drm_device *ddev = connector->dev;
8959         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8960         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8961         struct drm_plane *plane = disconnected_acrtc->base.primary;
8962         struct drm_connector_state *conn_state;
8963         struct drm_crtc_state *crtc_state;
8964         struct drm_plane_state *plane_state;
8965
8966         if (!state)
8967                 return -ENOMEM;
8968
8969         state->acquire_ctx = ddev->mode_config.acquire_ctx;
8970
8971         /* Construct an atomic state to restore previous display setting */
8972
8973         /*
8974          * Attach connectors to drm_atomic_state
8975          */
8976         conn_state = drm_atomic_get_connector_state(state, connector);
8977
8978         ret = PTR_ERR_OR_ZERO(conn_state);
8979         if (ret)
8980                 goto out;
8981
8982         /* Attach crtc to drm_atomic_state*/
8983         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
8984
8985         ret = PTR_ERR_OR_ZERO(crtc_state);
8986         if (ret)
8987                 goto out;
8988
8989         /* force a restore */
8990         crtc_state->mode_changed = true;
8991
8992         /* Attach plane to drm_atomic_state */
8993         plane_state = drm_atomic_get_plane_state(state, plane);
8994
8995         ret = PTR_ERR_OR_ZERO(plane_state);
8996         if (ret)
8997                 goto out;
8998
8999         /* Call commit internally with the state we just constructed */
9000         ret = drm_atomic_commit(state);
9001
9002 out:
9003         drm_atomic_state_put(state);
9004         if (ret)
9005                 DRM_ERROR("Restoring old state failed with %i\n", ret);
9006
9007         return ret;
9008 }
9009
9010 /*
9011  * This function handles all cases when set mode does not come upon hotplug.
9012  * This includes when a display is unplugged then plugged back into the
9013  * same port and when running without usermode desktop manager supprot
9014  */
9015 void dm_restore_drm_connector_state(struct drm_device *dev,
9016                                     struct drm_connector *connector)
9017 {
9018         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9019         struct amdgpu_crtc *disconnected_acrtc;
9020         struct dm_crtc_state *acrtc_state;
9021
9022         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9023                 return;
9024
9025         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9026         if (!disconnected_acrtc)
9027                 return;
9028
9029         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9030         if (!acrtc_state->stream)
9031                 return;
9032
9033         /*
9034          * If the previous sink is not released and different from the current,
9035          * we deduce we are in a state where we can not rely on usermode call
9036          * to turn on the display, so we do it here
9037          */
9038         if (acrtc_state->stream->sink != aconnector->dc_sink)
9039                 dm_force_atomic_commit(&aconnector->base);
9040 }
9041
9042 /*
9043  * Grabs all modesetting locks to serialize against any blocking commits,
9044  * Waits for completion of all non blocking commits.
9045  */
9046 static int do_aquire_global_lock(struct drm_device *dev,
9047                                  struct drm_atomic_state *state)
9048 {
9049         struct drm_crtc *crtc;
9050         struct drm_crtc_commit *commit;
9051         long ret;
9052
9053         /*
9054          * Adding all modeset locks to aquire_ctx will
9055          * ensure that when the framework release it the
9056          * extra locks we are locking here will get released to
9057          */
9058         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9059         if (ret)
9060                 return ret;
9061
9062         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9063                 spin_lock(&crtc->commit_lock);
9064                 commit = list_first_entry_or_null(&crtc->commit_list,
9065                                 struct drm_crtc_commit, commit_entry);
9066                 if (commit)
9067                         drm_crtc_commit_get(commit);
9068                 spin_unlock(&crtc->commit_lock);
9069
9070                 if (!commit)
9071                         continue;
9072
9073                 /*
9074                  * Make sure all pending HW programming completed and
9075                  * page flips done
9076                  */
9077                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9078
9079                 if (ret > 0)
9080                         ret = wait_for_completion_interruptible_timeout(
9081                                         &commit->flip_done, 10*HZ);
9082
9083                 if (ret == 0)
9084                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9085                                   "timed out\n", crtc->base.id, crtc->name);
9086
9087                 drm_crtc_commit_put(commit);
9088         }
9089
9090         return ret < 0 ? ret : 0;
9091 }
9092
9093 static void get_freesync_config_for_crtc(
9094         struct dm_crtc_state *new_crtc_state,
9095         struct dm_connector_state *new_con_state)
9096 {
9097         struct mod_freesync_config config = {0};
9098         struct amdgpu_dm_connector *aconnector =
9099                         to_amdgpu_dm_connector(new_con_state->base.connector);
9100         struct drm_display_mode *mode = &new_crtc_state->base.mode;
9101         int vrefresh = drm_mode_vrefresh(mode);
9102         bool fs_vid_mode = false;
9103
9104         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9105                                         vrefresh >= aconnector->min_vfreq &&
9106                                         vrefresh <= aconnector->max_vfreq;
9107
9108         if (new_crtc_state->vrr_supported) {
9109                 new_crtc_state->stream->ignore_msa_timing_param = true;
9110                 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9111
9112                 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9113                 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9114                 config.vsif_supported = true;
9115                 config.btr = true;
9116
9117                 if (fs_vid_mode) {
9118                         config.state = VRR_STATE_ACTIVE_FIXED;
9119                         config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9120                         goto out;
9121                 } else if (new_crtc_state->base.vrr_enabled) {
9122                         config.state = VRR_STATE_ACTIVE_VARIABLE;
9123                 } else {
9124                         config.state = VRR_STATE_INACTIVE;
9125                 }
9126         }
9127 out:
9128         new_crtc_state->freesync_config = config;
9129 }
9130
9131 static void reset_freesync_config_for_crtc(
9132         struct dm_crtc_state *new_crtc_state)
9133 {
9134         new_crtc_state->vrr_supported = false;
9135
9136         memset(&new_crtc_state->vrr_infopacket, 0,
9137                sizeof(new_crtc_state->vrr_infopacket));
9138 }
9139
9140 static bool
9141 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9142                                  struct drm_crtc_state *new_crtc_state)
9143 {
9144         struct drm_display_mode old_mode, new_mode;
9145
9146         if (!old_crtc_state || !new_crtc_state)
9147                 return false;
9148
9149         old_mode = old_crtc_state->mode;
9150         new_mode = new_crtc_state->mode;
9151
9152         if (old_mode.clock       == new_mode.clock &&
9153             old_mode.hdisplay    == new_mode.hdisplay &&
9154             old_mode.vdisplay    == new_mode.vdisplay &&
9155             old_mode.htotal      == new_mode.htotal &&
9156             old_mode.vtotal      != new_mode.vtotal &&
9157             old_mode.hsync_start == new_mode.hsync_start &&
9158             old_mode.vsync_start != new_mode.vsync_start &&
9159             old_mode.hsync_end   == new_mode.hsync_end &&
9160             old_mode.vsync_end   != new_mode.vsync_end &&
9161             old_mode.hskew       == new_mode.hskew &&
9162             old_mode.vscan       == new_mode.vscan &&
9163             (old_mode.vsync_end - old_mode.vsync_start) ==
9164             (new_mode.vsync_end - new_mode.vsync_start))
9165                 return true;
9166
9167         return false;
9168 }
9169
9170 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9171         uint64_t num, den, res;
9172         struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9173
9174         dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9175
9176         num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9177         den = (unsigned long long)new_crtc_state->mode.htotal *
9178               (unsigned long long)new_crtc_state->mode.vtotal;
9179
9180         res = div_u64(num, den);
9181         dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9182 }
9183
9184 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9185                                 struct drm_atomic_state *state,
9186                                 struct drm_crtc *crtc,
9187                                 struct drm_crtc_state *old_crtc_state,
9188                                 struct drm_crtc_state *new_crtc_state,
9189                                 bool enable,
9190                                 bool *lock_and_validation_needed)
9191 {
9192         struct dm_atomic_state *dm_state = NULL;
9193         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9194         struct dc_stream_state *new_stream;
9195         int ret = 0;
9196
9197         /*
9198          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9199          * update changed items
9200          */
9201         struct amdgpu_crtc *acrtc = NULL;
9202         struct amdgpu_dm_connector *aconnector = NULL;
9203         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9204         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
9205
9206         new_stream = NULL;
9207
9208         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9209         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9210         acrtc = to_amdgpu_crtc(crtc);
9211         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
9212
9213         /* TODO This hack should go away */
9214         if (aconnector && enable) {
9215                 /* Make sure fake sink is created in plug-in scenario */
9216                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9217                                                             &aconnector->base);
9218                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9219                                                             &aconnector->base);
9220
9221                 if (IS_ERR(drm_new_conn_state)) {
9222                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9223                         goto fail;
9224                 }
9225
9226                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9227                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
9228
9229                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9230                         goto skip_modeset;
9231
9232                 new_stream = create_validate_stream_for_sink(aconnector,
9233                                                              &new_crtc_state->mode,
9234                                                              dm_new_conn_state,
9235                                                              dm_old_crtc_state->stream);
9236
9237                 /*
9238                  * we can have no stream on ACTION_SET if a display
9239                  * was disconnected during S3, in this case it is not an
9240                  * error, the OS will be updated after detection, and
9241                  * will do the right thing on next atomic commit
9242                  */
9243
9244                 if (!new_stream) {
9245                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9246                                         __func__, acrtc->base.base.id);
9247                         ret = -ENOMEM;
9248                         goto fail;
9249                 }
9250
9251                 /*
9252                  * TODO: Check VSDB bits to decide whether this should
9253                  * be enabled or not.
9254                  */
9255                 new_stream->triggered_crtc_reset.enabled =
9256                         dm->force_timing_sync;
9257
9258                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9259
9260                 ret = fill_hdr_info_packet(drm_new_conn_state,
9261                                            &new_stream->hdr_static_metadata);
9262                 if (ret)
9263                         goto fail;
9264
9265                 /*
9266                  * If we already removed the old stream from the context
9267                  * (and set the new stream to NULL) then we can't reuse
9268                  * the old stream even if the stream and scaling are unchanged.
9269                  * We'll hit the BUG_ON and black screen.
9270                  *
9271                  * TODO: Refactor this function to allow this check to work
9272                  * in all conditions.
9273                  */
9274                 if (amdgpu_freesync_vid_mode &&
9275                     dm_new_crtc_state->stream &&
9276                     is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9277                         goto skip_modeset;
9278
9279                 if (dm_new_crtc_state->stream &&
9280                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9281                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9282                         new_crtc_state->mode_changed = false;
9283                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9284                                          new_crtc_state->mode_changed);
9285                 }
9286         }
9287
9288         /* mode_changed flag may get updated above, need to check again */
9289         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9290                 goto skip_modeset;
9291
9292         DRM_DEBUG_ATOMIC(
9293                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9294                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9295                 "connectors_changed:%d\n",
9296                 acrtc->crtc_id,
9297                 new_crtc_state->enable,
9298                 new_crtc_state->active,
9299                 new_crtc_state->planes_changed,
9300                 new_crtc_state->mode_changed,
9301                 new_crtc_state->active_changed,
9302                 new_crtc_state->connectors_changed);
9303
9304         /* Remove stream for any changed/disabled CRTC */
9305         if (!enable) {
9306
9307                 if (!dm_old_crtc_state->stream)
9308                         goto skip_modeset;
9309
9310                 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
9311                     is_timing_unchanged_for_freesync(new_crtc_state,
9312                                                      old_crtc_state)) {
9313                         new_crtc_state->mode_changed = false;
9314                         DRM_DEBUG_DRIVER(
9315                                 "Mode change not required for front porch change, "
9316                                 "setting mode_changed to %d",
9317                                 new_crtc_state->mode_changed);
9318
9319                         set_freesync_fixed_config(dm_new_crtc_state);
9320
9321                         goto skip_modeset;
9322                 } else if (amdgpu_freesync_vid_mode && aconnector &&
9323                            is_freesync_video_mode(&new_crtc_state->mode,
9324                                                   aconnector)) {
9325                         set_freesync_fixed_config(dm_new_crtc_state);
9326                 }
9327
9328                 ret = dm_atomic_get_state(state, &dm_state);
9329                 if (ret)
9330                         goto fail;
9331
9332                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9333                                 crtc->base.id);
9334
9335                 /* i.e. reset mode */
9336                 if (dc_remove_stream_from_ctx(
9337                                 dm->dc,
9338                                 dm_state->context,
9339                                 dm_old_crtc_state->stream) != DC_OK) {
9340                         ret = -EINVAL;
9341                         goto fail;
9342                 }
9343
9344                 dc_stream_release(dm_old_crtc_state->stream);
9345                 dm_new_crtc_state->stream = NULL;
9346
9347                 reset_freesync_config_for_crtc(dm_new_crtc_state);
9348
9349                 *lock_and_validation_needed = true;
9350
9351         } else {/* Add stream for any updated/enabled CRTC */
9352                 /*
9353                  * Quick fix to prevent NULL pointer on new_stream when
9354                  * added MST connectors not found in existing crtc_state in the chained mode
9355                  * TODO: need to dig out the root cause of that
9356                  */
9357                 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
9358                         goto skip_modeset;
9359
9360                 if (modereset_required(new_crtc_state))
9361                         goto skip_modeset;
9362
9363                 if (modeset_required(new_crtc_state, new_stream,
9364                                      dm_old_crtc_state->stream)) {
9365
9366                         WARN_ON(dm_new_crtc_state->stream);
9367
9368                         ret = dm_atomic_get_state(state, &dm_state);
9369                         if (ret)
9370                                 goto fail;
9371
9372                         dm_new_crtc_state->stream = new_stream;
9373
9374                         dc_stream_retain(new_stream);
9375
9376                         DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
9377                                          crtc->base.id);
9378
9379                         if (dc_add_stream_to_ctx(
9380                                         dm->dc,
9381                                         dm_state->context,
9382                                         dm_new_crtc_state->stream) != DC_OK) {
9383                                 ret = -EINVAL;
9384                                 goto fail;
9385                         }
9386
9387                         *lock_and_validation_needed = true;
9388                 }
9389         }
9390
9391 skip_modeset:
9392         /* Release extra reference */
9393         if (new_stream)
9394                  dc_stream_release(new_stream);
9395
9396         /*
9397          * We want to do dc stream updates that do not require a
9398          * full modeset below.
9399          */
9400         if (!(enable && aconnector && new_crtc_state->active))
9401                 return 0;
9402         /*
9403          * Given above conditions, the dc state cannot be NULL because:
9404          * 1. We're in the process of enabling CRTCs (just been added
9405          *    to the dc context, or already is on the context)
9406          * 2. Has a valid connector attached, and
9407          * 3. Is currently active and enabled.
9408          * => The dc stream state currently exists.
9409          */
9410         BUG_ON(dm_new_crtc_state->stream == NULL);
9411
9412         /* Scaling or underscan settings */
9413         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
9414                 update_stream_scaling_settings(
9415                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
9416
9417         /* ABM settings */
9418         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9419
9420         /*
9421          * Color management settings. We also update color properties
9422          * when a modeset is needed, to ensure it gets reprogrammed.
9423          */
9424         if (dm_new_crtc_state->base.color_mgmt_changed ||
9425             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9426                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
9427                 if (ret)
9428                         goto fail;
9429         }
9430
9431         /* Update Freesync settings. */
9432         get_freesync_config_for_crtc(dm_new_crtc_state,
9433                                      dm_new_conn_state);
9434
9435         return ret;
9436
9437 fail:
9438         if (new_stream)
9439                 dc_stream_release(new_stream);
9440         return ret;
9441 }
9442
9443 static bool should_reset_plane(struct drm_atomic_state *state,
9444                                struct drm_plane *plane,
9445                                struct drm_plane_state *old_plane_state,
9446                                struct drm_plane_state *new_plane_state)
9447 {
9448         struct drm_plane *other;
9449         struct drm_plane_state *old_other_state, *new_other_state;
9450         struct drm_crtc_state *new_crtc_state;
9451         int i;
9452
9453         /*
9454          * TODO: Remove this hack once the checks below are sufficient
9455          * enough to determine when we need to reset all the planes on
9456          * the stream.
9457          */
9458         if (state->allow_modeset)
9459                 return true;
9460
9461         /* Exit early if we know that we're adding or removing the plane. */
9462         if (old_plane_state->crtc != new_plane_state->crtc)
9463                 return true;
9464
9465         /* old crtc == new_crtc == NULL, plane not in context. */
9466         if (!new_plane_state->crtc)
9467                 return false;
9468
9469         new_crtc_state =
9470                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9471
9472         if (!new_crtc_state)
9473                 return true;
9474
9475         /* CRTC Degamma changes currently require us to recreate planes. */
9476         if (new_crtc_state->color_mgmt_changed)
9477                 return true;
9478
9479         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9480                 return true;
9481
9482         /*
9483          * If there are any new primary or overlay planes being added or
9484          * removed then the z-order can potentially change. To ensure
9485          * correct z-order and pipe acquisition the current DC architecture
9486          * requires us to remove and recreate all existing planes.
9487          *
9488          * TODO: Come up with a more elegant solution for this.
9489          */
9490         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9491                 struct amdgpu_framebuffer *old_afb, *new_afb;
9492                 if (other->type == DRM_PLANE_TYPE_CURSOR)
9493                         continue;
9494
9495                 if (old_other_state->crtc != new_plane_state->crtc &&
9496                     new_other_state->crtc != new_plane_state->crtc)
9497                         continue;
9498
9499                 if (old_other_state->crtc != new_other_state->crtc)
9500                         return true;
9501
9502                 /* Src/dst size and scaling updates. */
9503                 if (old_other_state->src_w != new_other_state->src_w ||
9504                     old_other_state->src_h != new_other_state->src_h ||
9505                     old_other_state->crtc_w != new_other_state->crtc_w ||
9506                     old_other_state->crtc_h != new_other_state->crtc_h)
9507                         return true;
9508
9509                 /* Rotation / mirroring updates. */
9510                 if (old_other_state->rotation != new_other_state->rotation)
9511                         return true;
9512
9513                 /* Blending updates. */
9514                 if (old_other_state->pixel_blend_mode !=
9515                     new_other_state->pixel_blend_mode)
9516                         return true;
9517
9518                 /* Alpha updates. */
9519                 if (old_other_state->alpha != new_other_state->alpha)
9520                         return true;
9521
9522                 /* Colorspace changes. */
9523                 if (old_other_state->color_range != new_other_state->color_range ||
9524                     old_other_state->color_encoding != new_other_state->color_encoding)
9525                         return true;
9526
9527                 /* Framebuffer checks fall at the end. */
9528                 if (!old_other_state->fb || !new_other_state->fb)
9529                         continue;
9530
9531                 /* Pixel format changes can require bandwidth updates. */
9532                 if (old_other_state->fb->format != new_other_state->fb->format)
9533                         return true;
9534
9535                 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9536                 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9537
9538                 /* Tiling and DCC changes also require bandwidth updates. */
9539                 if (old_afb->tiling_flags != new_afb->tiling_flags ||
9540                     old_afb->base.modifier != new_afb->base.modifier)
9541                         return true;
9542         }
9543
9544         return false;
9545 }
9546
9547 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9548                               struct drm_plane_state *new_plane_state,
9549                               struct drm_framebuffer *fb)
9550 {
9551         struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9552         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9553         unsigned int pitch;
9554         bool linear;
9555
9556         if (fb->width > new_acrtc->max_cursor_width ||
9557             fb->height > new_acrtc->max_cursor_height) {
9558                 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9559                                  new_plane_state->fb->width,
9560                                  new_plane_state->fb->height);
9561                 return -EINVAL;
9562         }
9563         if (new_plane_state->src_w != fb->width << 16 ||
9564             new_plane_state->src_h != fb->height << 16) {
9565                 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9566                 return -EINVAL;
9567         }
9568
9569         /* Pitch in pixels */
9570         pitch = fb->pitches[0] / fb->format->cpp[0];
9571
9572         if (fb->width != pitch) {
9573                 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9574                                  fb->width, pitch);
9575                 return -EINVAL;
9576         }
9577
9578         switch (pitch) {
9579         case 64:
9580         case 128:
9581         case 256:
9582                 /* FB pitch is supported by cursor plane */
9583                 break;
9584         default:
9585                 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9586                 return -EINVAL;
9587         }
9588
9589         /* Core DRM takes care of checking FB modifiers, so we only need to
9590          * check tiling flags when the FB doesn't have a modifier. */
9591         if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9592                 if (adev->family < AMDGPU_FAMILY_AI) {
9593                         linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9594                                  AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9595                                  AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9596                 } else {
9597                         linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9598                 }
9599                 if (!linear) {
9600                         DRM_DEBUG_ATOMIC("Cursor FB not linear");
9601                         return -EINVAL;
9602                 }
9603         }
9604
9605         return 0;
9606 }
9607
9608 static int dm_update_plane_state(struct dc *dc,
9609                                  struct drm_atomic_state *state,
9610                                  struct drm_plane *plane,
9611                                  struct drm_plane_state *old_plane_state,
9612                                  struct drm_plane_state *new_plane_state,
9613                                  bool enable,
9614                                  bool *lock_and_validation_needed)
9615 {
9616
9617         struct dm_atomic_state *dm_state = NULL;
9618         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9619         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9620         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9621         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9622         struct amdgpu_crtc *new_acrtc;
9623         bool needs_reset;
9624         int ret = 0;
9625
9626
9627         new_plane_crtc = new_plane_state->crtc;
9628         old_plane_crtc = old_plane_state->crtc;
9629         dm_new_plane_state = to_dm_plane_state(new_plane_state);
9630         dm_old_plane_state = to_dm_plane_state(old_plane_state);
9631
9632         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9633                 if (!enable || !new_plane_crtc ||
9634                         drm_atomic_plane_disabling(plane->state, new_plane_state))
9635                         return 0;
9636
9637                 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9638
9639                 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9640                         DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9641                         return -EINVAL;
9642                 }
9643
9644                 if (new_plane_state->fb) {
9645                         ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9646                                                  new_plane_state->fb);
9647                         if (ret)
9648                                 return ret;
9649                 }
9650
9651                 return 0;
9652         }
9653
9654         needs_reset = should_reset_plane(state, plane, old_plane_state,
9655                                          new_plane_state);
9656
9657         /* Remove any changed/removed planes */
9658         if (!enable) {
9659                 if (!needs_reset)
9660                         return 0;
9661
9662                 if (!old_plane_crtc)
9663                         return 0;
9664
9665                 old_crtc_state = drm_atomic_get_old_crtc_state(
9666                                 state, old_plane_crtc);
9667                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9668
9669                 if (!dm_old_crtc_state->stream)
9670                         return 0;
9671
9672                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9673                                 plane->base.id, old_plane_crtc->base.id);
9674
9675                 ret = dm_atomic_get_state(state, &dm_state);
9676                 if (ret)
9677                         return ret;
9678
9679                 if (!dc_remove_plane_from_context(
9680                                 dc,
9681                                 dm_old_crtc_state->stream,
9682                                 dm_old_plane_state->dc_state,
9683                                 dm_state->context)) {
9684
9685                         return -EINVAL;
9686                 }
9687
9688
9689                 dc_plane_state_release(dm_old_plane_state->dc_state);
9690                 dm_new_plane_state->dc_state = NULL;
9691
9692                 *lock_and_validation_needed = true;
9693
9694         } else { /* Add new planes */
9695                 struct dc_plane_state *dc_new_plane_state;
9696
9697                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9698                         return 0;
9699
9700                 if (!new_plane_crtc)
9701                         return 0;
9702
9703                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9704                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9705
9706                 if (!dm_new_crtc_state->stream)
9707                         return 0;
9708
9709                 if (!needs_reset)
9710                         return 0;
9711
9712                 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9713                 if (ret)
9714                         return ret;
9715
9716                 WARN_ON(dm_new_plane_state->dc_state);
9717
9718                 dc_new_plane_state = dc_create_plane_state(dc);
9719                 if (!dc_new_plane_state)
9720                         return -ENOMEM;
9721
9722                 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
9723                                  plane->base.id, new_plane_crtc->base.id);
9724
9725                 ret = fill_dc_plane_attributes(
9726                         drm_to_adev(new_plane_crtc->dev),
9727                         dc_new_plane_state,
9728                         new_plane_state,
9729                         new_crtc_state);
9730                 if (ret) {
9731                         dc_plane_state_release(dc_new_plane_state);
9732                         return ret;
9733                 }
9734
9735                 ret = dm_atomic_get_state(state, &dm_state);
9736                 if (ret) {
9737                         dc_plane_state_release(dc_new_plane_state);
9738                         return ret;
9739                 }
9740
9741                 /*
9742                  * Any atomic check errors that occur after this will
9743                  * not need a release. The plane state will be attached
9744                  * to the stream, and therefore part of the atomic
9745                  * state. It'll be released when the atomic state is
9746                  * cleaned.
9747                  */
9748                 if (!dc_add_plane_to_context(
9749                                 dc,
9750                                 dm_new_crtc_state->stream,
9751                                 dc_new_plane_state,
9752                                 dm_state->context)) {
9753
9754                         dc_plane_state_release(dc_new_plane_state);
9755                         return -EINVAL;
9756                 }
9757
9758                 dm_new_plane_state->dc_state = dc_new_plane_state;
9759
9760                 /* Tell DC to do a full surface update every time there
9761                  * is a plane change. Inefficient, but works for now.
9762                  */
9763                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9764
9765                 *lock_and_validation_needed = true;
9766         }
9767
9768
9769         return ret;
9770 }
9771
9772 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9773                                 struct drm_crtc *crtc,
9774                                 struct drm_crtc_state *new_crtc_state)
9775 {
9776         struct drm_plane_state *new_cursor_state, *new_primary_state;
9777         int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9778
9779         /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9780          * cursor per pipe but it's going to inherit the scaling and
9781          * positioning from the underlying pipe. Check the cursor plane's
9782          * blending properties match the primary plane's. */
9783
9784         new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9785         new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9786         if (!new_cursor_state || !new_primary_state ||
9787             !new_cursor_state->fb || !new_primary_state->fb) {
9788                 return 0;
9789         }
9790
9791         cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9792                          (new_cursor_state->src_w >> 16);
9793         cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9794                          (new_cursor_state->src_h >> 16);
9795
9796         primary_scale_w = new_primary_state->crtc_w * 1000 /
9797                          (new_primary_state->src_w >> 16);
9798         primary_scale_h = new_primary_state->crtc_h * 1000 /
9799                          (new_primary_state->src_h >> 16);
9800
9801         if (cursor_scale_w != primary_scale_w ||
9802             cursor_scale_h != primary_scale_h) {
9803                 DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9804                 return -EINVAL;
9805         }
9806
9807         return 0;
9808 }
9809
9810 #if defined(CONFIG_DRM_AMD_DC_DCN)
9811 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9812 {
9813         struct drm_connector *connector;
9814         struct drm_connector_state *conn_state;
9815         struct amdgpu_dm_connector *aconnector = NULL;
9816         int i;
9817         for_each_new_connector_in_state(state, connector, conn_state, i) {
9818                 if (conn_state->crtc != crtc)
9819                         continue;
9820
9821                 aconnector = to_amdgpu_dm_connector(connector);
9822                 if (!aconnector->port || !aconnector->mst_port)
9823                         aconnector = NULL;
9824                 else
9825                         break;
9826         }
9827
9828         if (!aconnector)
9829                 return 0;
9830
9831         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9832 }
9833 #endif
9834
9835 /**
9836  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9837  * @dev: The DRM device
9838  * @state: The atomic state to commit
9839  *
9840  * Validate that the given atomic state is programmable by DC into hardware.
9841  * This involves constructing a &struct dc_state reflecting the new hardware
9842  * state we wish to commit, then querying DC to see if it is programmable. It's
9843  * important not to modify the existing DC state. Otherwise, atomic_check
9844  * may unexpectedly commit hardware changes.
9845  *
9846  * When validating the DC state, it's important that the right locks are
9847  * acquired. For full updates case which removes/adds/updates streams on one
9848  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9849  * that any such full update commit will wait for completion of any outstanding
9850  * flip using DRMs synchronization events.
9851  *
9852  * Note that DM adds the affected connectors for all CRTCs in state, when that
9853  * might not seem necessary. This is because DC stream creation requires the
9854  * DC sink, which is tied to the DRM connector state. Cleaning this up should
9855  * be possible but non-trivial - a possible TODO item.
9856  *
9857  * Return: -Error code if validation failed.
9858  */
9859 static int amdgpu_dm_atomic_check(struct drm_device *dev,
9860                                   struct drm_atomic_state *state)
9861 {
9862         struct amdgpu_device *adev = drm_to_adev(dev);
9863         struct dm_atomic_state *dm_state = NULL;
9864         struct dc *dc = adev->dm.dc;
9865         struct drm_connector *connector;
9866         struct drm_connector_state *old_con_state, *new_con_state;
9867         struct drm_crtc *crtc;
9868         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9869         struct drm_plane *plane;
9870         struct drm_plane_state *old_plane_state, *new_plane_state;
9871         enum dc_status status;
9872         int ret, i;
9873         bool lock_and_validation_needed = false;
9874         struct dm_crtc_state *dm_old_crtc_state;
9875
9876         trace_amdgpu_dm_atomic_check_begin(state);
9877
9878         ret = drm_atomic_helper_check_modeset(dev, state);
9879         if (ret)
9880                 goto fail;
9881
9882         /* Check connector changes */
9883         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9884                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9885                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9886
9887                 /* Skip connectors that are disabled or part of modeset already. */
9888                 if (!old_con_state->crtc && !new_con_state->crtc)
9889                         continue;
9890
9891                 if (!new_con_state->crtc)
9892                         continue;
9893
9894                 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9895                 if (IS_ERR(new_crtc_state)) {
9896                         ret = PTR_ERR(new_crtc_state);
9897                         goto fail;
9898                 }
9899
9900                 if (dm_old_con_state->abm_level !=
9901                     dm_new_con_state->abm_level)
9902                         new_crtc_state->connectors_changed = true;
9903         }
9904
9905 #if defined(CONFIG_DRM_AMD_DC_DCN)
9906         if (dc_resource_is_dsc_encoding_supported(dc)) {
9907                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9908                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9909                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
9910                                 if (ret)
9911                                         goto fail;
9912                         }
9913                 }
9914         }
9915 #endif
9916         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9917                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9918
9919                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
9920                     !new_crtc_state->color_mgmt_changed &&
9921                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9922                         dm_old_crtc_state->dsc_force_changed == false)
9923                         continue;
9924
9925                 if (!new_crtc_state->enable)
9926                         continue;
9927
9928                 ret = drm_atomic_add_affected_connectors(state, crtc);
9929                 if (ret)
9930                         return ret;
9931
9932                 ret = drm_atomic_add_affected_planes(state, crtc);
9933                 if (ret)
9934                         goto fail;
9935
9936                 if (dm_old_crtc_state->dsc_force_changed)
9937                         new_crtc_state->mode_changed = true;
9938         }
9939
9940         /*
9941          * Add all primary and overlay planes on the CRTC to the state
9942          * whenever a plane is enabled to maintain correct z-ordering
9943          * and to enable fast surface updates.
9944          */
9945         drm_for_each_crtc(crtc, dev) {
9946                 bool modified = false;
9947
9948                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9949                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
9950                                 continue;
9951
9952                         if (new_plane_state->crtc == crtc ||
9953                             old_plane_state->crtc == crtc) {
9954                                 modified = true;
9955                                 break;
9956                         }
9957                 }
9958
9959                 if (!modified)
9960                         continue;
9961
9962                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
9963                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
9964                                 continue;
9965
9966                         new_plane_state =
9967                                 drm_atomic_get_plane_state(state, plane);
9968
9969                         if (IS_ERR(new_plane_state)) {
9970                                 ret = PTR_ERR(new_plane_state);
9971                                 goto fail;
9972                         }
9973                 }
9974         }
9975
9976         /* Remove exiting planes if they are modified */
9977         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9978                 ret = dm_update_plane_state(dc, state, plane,
9979                                             old_plane_state,
9980                                             new_plane_state,
9981                                             false,
9982                                             &lock_and_validation_needed);
9983                 if (ret)
9984                         goto fail;
9985         }
9986
9987         /* Disable all crtcs which require disable */
9988         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9989                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9990                                            old_crtc_state,
9991                                            new_crtc_state,
9992                                            false,
9993                                            &lock_and_validation_needed);
9994                 if (ret)
9995                         goto fail;
9996         }
9997
9998         /* Enable all crtcs which require enable */
9999         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10000                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10001                                            old_crtc_state,
10002                                            new_crtc_state,
10003                                            true,
10004                                            &lock_and_validation_needed);
10005                 if (ret)
10006                         goto fail;
10007         }
10008
10009         /* Add new/modified planes */
10010         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10011                 ret = dm_update_plane_state(dc, state, plane,
10012                                             old_plane_state,
10013                                             new_plane_state,
10014                                             true,
10015                                             &lock_and_validation_needed);
10016                 if (ret)
10017                         goto fail;
10018         }
10019
10020         /* Run this here since we want to validate the streams we created */
10021         ret = drm_atomic_helper_check_planes(dev, state);
10022         if (ret)
10023                 goto fail;
10024
10025         /* Check cursor planes scaling */
10026         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10027                 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10028                 if (ret)
10029                         goto fail;
10030         }
10031
10032         if (state->legacy_cursor_update) {
10033                 /*
10034                  * This is a fast cursor update coming from the plane update
10035                  * helper, check if it can be done asynchronously for better
10036                  * performance.
10037                  */
10038                 state->async_update =
10039                         !drm_atomic_helper_async_check(dev, state);
10040
10041                 /*
10042                  * Skip the remaining global validation if this is an async
10043                  * update. Cursor updates can be done without affecting
10044                  * state or bandwidth calcs and this avoids the performance
10045                  * penalty of locking the private state object and
10046                  * allocating a new dc_state.
10047                  */
10048                 if (state->async_update)
10049                         return 0;
10050         }
10051
10052         /* Check scaling and underscan changes*/
10053         /* TODO Removed scaling changes validation due to inability to commit
10054          * new stream into context w\o causing full reset. Need to
10055          * decide how to handle.
10056          */
10057         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10058                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10059                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10060                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10061
10062                 /* Skip any modesets/resets */
10063                 if (!acrtc || drm_atomic_crtc_needs_modeset(
10064                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10065                         continue;
10066
10067                 /* Skip any thing not scale or underscan changes */
10068                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10069                         continue;
10070
10071                 lock_and_validation_needed = true;
10072         }
10073
10074         /**
10075          * Streams and planes are reset when there are changes that affect
10076          * bandwidth. Anything that affects bandwidth needs to go through
10077          * DC global validation to ensure that the configuration can be applied
10078          * to hardware.
10079          *
10080          * We have to currently stall out here in atomic_check for outstanding
10081          * commits to finish in this case because our IRQ handlers reference
10082          * DRM state directly - we can end up disabling interrupts too early
10083          * if we don't.
10084          *
10085          * TODO: Remove this stall and drop DM state private objects.
10086          */
10087         if (lock_and_validation_needed) {
10088                 ret = dm_atomic_get_state(state, &dm_state);
10089                 if (ret)
10090                         goto fail;
10091
10092                 ret = do_aquire_global_lock(dev, state);
10093                 if (ret)
10094                         goto fail;
10095
10096 #if defined(CONFIG_DRM_AMD_DC_DCN)
10097                 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
10098                         goto fail;
10099
10100                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
10101                 if (ret)
10102                         goto fail;
10103 #endif
10104
10105                 /*
10106                  * Perform validation of MST topology in the state:
10107                  * We need to perform MST atomic check before calling
10108                  * dc_validate_global_state(), or there is a chance
10109                  * to get stuck in an infinite loop and hang eventually.
10110                  */
10111                 ret = drm_dp_mst_atomic_check(state);
10112                 if (ret)
10113                         goto fail;
10114                 status = dc_validate_global_state(dc, dm_state->context, false);
10115                 if (status != DC_OK) {
10116                         DC_LOG_WARNING("DC global validation failure: %s (%d)",
10117                                        dc_status_to_str(status), status);
10118                         ret = -EINVAL;
10119                         goto fail;
10120                 }
10121         } else {
10122                 /*
10123                  * The commit is a fast update. Fast updates shouldn't change
10124                  * the DC context, affect global validation, and can have their
10125                  * commit work done in parallel with other commits not touching
10126                  * the same resource. If we have a new DC context as part of
10127                  * the DM atomic state from validation we need to free it and
10128                  * retain the existing one instead.
10129                  *
10130                  * Furthermore, since the DM atomic state only contains the DC
10131                  * context and can safely be annulled, we can free the state
10132                  * and clear the associated private object now to free
10133                  * some memory and avoid a possible use-after-free later.
10134                  */
10135
10136                 for (i = 0; i < state->num_private_objs; i++) {
10137                         struct drm_private_obj *obj = state->private_objs[i].ptr;
10138
10139                         if (obj->funcs == adev->dm.atomic_obj.funcs) {
10140                                 int j = state->num_private_objs-1;
10141
10142                                 dm_atomic_destroy_state(obj,
10143                                                 state->private_objs[i].state);
10144
10145                                 /* If i is not at the end of the array then the
10146                                  * last element needs to be moved to where i was
10147                                  * before the array can safely be truncated.
10148                                  */
10149                                 if (i != j)
10150                                         state->private_objs[i] =
10151                                                 state->private_objs[j];
10152
10153                                 state->private_objs[j].ptr = NULL;
10154                                 state->private_objs[j].state = NULL;
10155                                 state->private_objs[j].old_state = NULL;
10156                                 state->private_objs[j].new_state = NULL;
10157
10158                                 state->num_private_objs = j;
10159                                 break;
10160                         }
10161                 }
10162         }
10163
10164         /* Store the overall update type for use later in atomic check. */
10165         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10166                 struct dm_crtc_state *dm_new_crtc_state =
10167                         to_dm_crtc_state(new_crtc_state);
10168
10169                 dm_new_crtc_state->update_type = lock_and_validation_needed ?
10170                                                          UPDATE_TYPE_FULL :
10171                                                          UPDATE_TYPE_FAST;
10172         }
10173
10174         /* Must be success */
10175         WARN_ON(ret);
10176
10177         trace_amdgpu_dm_atomic_check_finish(state, ret);
10178
10179         return ret;
10180
10181 fail:
10182         if (ret == -EDEADLK)
10183                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
10184         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
10185                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
10186         else
10187                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
10188
10189         trace_amdgpu_dm_atomic_check_finish(state, ret);
10190
10191         return ret;
10192 }
10193
10194 static bool is_dp_capable_without_timing_msa(struct dc *dc,
10195                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
10196 {
10197         uint8_t dpcd_data;
10198         bool capable = false;
10199
10200         if (amdgpu_dm_connector->dc_link &&
10201                 dm_helpers_dp_read_dpcd(
10202                                 NULL,
10203                                 amdgpu_dm_connector->dc_link,
10204                                 DP_DOWN_STREAM_PORT_COUNT,
10205                                 &dpcd_data,
10206                                 sizeof(dpcd_data))) {
10207                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10208         }
10209
10210         return capable;
10211 }
10212
10213 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10214                 uint8_t *edid_ext, int len,
10215                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10216 {
10217         int i;
10218         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10219         struct dc *dc = adev->dm.dc;
10220
10221         /* send extension block to DMCU for parsing */
10222         for (i = 0; i < len; i += 8) {
10223                 bool res;
10224                 int offset;
10225
10226                 /* send 8 bytes a time */
10227                 if (!dc_edid_parser_send_cea(dc, i, len, &edid_ext[i], 8))
10228                         return false;
10229
10230                 if (i+8 == len) {
10231                         /* EDID block sent completed, expect result */
10232                         int version, min_rate, max_rate;
10233
10234                         res = dc_edid_parser_recv_amd_vsdb(dc, &version, &min_rate, &max_rate);
10235                         if (res) {
10236                                 /* amd vsdb found */
10237                                 vsdb_info->freesync_supported = 1;
10238                                 vsdb_info->amd_vsdb_version = version;
10239                                 vsdb_info->min_refresh_rate_hz = min_rate;
10240                                 vsdb_info->max_refresh_rate_hz = max_rate;
10241                                 return true;
10242                         }
10243                         /* not amd vsdb */
10244                         return false;
10245                 }
10246
10247                 /* check for ack*/
10248                 res = dc_edid_parser_recv_cea_ack(dc, &offset);
10249                 if (!res)
10250                         return false;
10251         }
10252
10253         return false;
10254 }
10255
10256 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10257                 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10258 {
10259         uint8_t *edid_ext = NULL;
10260         int i;
10261         bool valid_vsdb_found = false;
10262
10263         /*----- drm_find_cea_extension() -----*/
10264         /* No EDID or EDID extensions */
10265         if (edid == NULL || edid->extensions == 0)
10266                 return -ENODEV;
10267
10268         /* Find CEA extension */
10269         for (i = 0; i < edid->extensions; i++) {
10270                 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10271                 if (edid_ext[0] == CEA_EXT)
10272                         break;
10273         }
10274
10275         if (i == edid->extensions)
10276                 return -ENODEV;
10277
10278         /*----- cea_db_offsets() -----*/
10279         if (edid_ext[0] != CEA_EXT)
10280                 return -ENODEV;
10281
10282         valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
10283
10284         return valid_vsdb_found ? i : -ENODEV;
10285 }
10286
10287 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10288                                         struct edid *edid)
10289 {
10290         int i = 0;
10291         struct detailed_timing *timing;
10292         struct detailed_non_pixel *data;
10293         struct detailed_data_monitor_range *range;
10294         struct amdgpu_dm_connector *amdgpu_dm_connector =
10295                         to_amdgpu_dm_connector(connector);
10296         struct dm_connector_state *dm_con_state = NULL;
10297
10298         struct drm_device *dev = connector->dev;
10299         struct amdgpu_device *adev = drm_to_adev(dev);
10300         bool freesync_capable = false;
10301         struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
10302
10303         if (!connector->state) {
10304                 DRM_ERROR("%s - Connector has no state", __func__);
10305                 goto update;
10306         }
10307
10308         if (!edid) {
10309                 dm_con_state = to_dm_connector_state(connector->state);
10310
10311                 amdgpu_dm_connector->min_vfreq = 0;
10312                 amdgpu_dm_connector->max_vfreq = 0;
10313                 amdgpu_dm_connector->pixel_clock_mhz = 0;
10314
10315                 goto update;
10316         }
10317
10318         dm_con_state = to_dm_connector_state(connector->state);
10319
10320         if (!amdgpu_dm_connector->dc_sink) {
10321                 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
10322                 goto update;
10323         }
10324         if (!adev->dm.freesync_module)
10325                 goto update;
10326
10327
10328         if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10329                 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
10330                 bool edid_check_required = false;
10331
10332                 if (edid) {
10333                         edid_check_required = is_dp_capable_without_timing_msa(
10334                                                 adev->dm.dc,
10335                                                 amdgpu_dm_connector);
10336                 }
10337
10338                 if (edid_check_required == true && (edid->version > 1 ||
10339                    (edid->version == 1 && edid->revision > 1))) {
10340                         for (i = 0; i < 4; i++) {
10341
10342                                 timing  = &edid->detailed_timings[i];
10343                                 data    = &timing->data.other_data;
10344                                 range   = &data->data.range;
10345                                 /*
10346                                  * Check if monitor has continuous frequency mode
10347                                  */
10348                                 if (data->type != EDID_DETAIL_MONITOR_RANGE)
10349                                         continue;
10350                                 /*
10351                                  * Check for flag range limits only. If flag == 1 then
10352                                  * no additional timing information provided.
10353                                  * Default GTF, GTF Secondary curve and CVT are not
10354                                  * supported
10355                                  */
10356                                 if (range->flags != 1)
10357                                         continue;
10358
10359                                 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10360                                 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10361                                 amdgpu_dm_connector->pixel_clock_mhz =
10362                                         range->pixel_clock_mhz * 10;
10363
10364                                 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10365                                 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10366
10367                                 break;
10368                         }
10369
10370                         if (amdgpu_dm_connector->max_vfreq -
10371                             amdgpu_dm_connector->min_vfreq > 10) {
10372
10373                                 freesync_capable = true;
10374                         }
10375                 }
10376         } else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10377                 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10378                 if (i >= 0 && vsdb_info.freesync_supported) {
10379                         timing  = &edid->detailed_timings[i];
10380                         data    = &timing->data.other_data;
10381
10382                         amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10383                         amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10384                         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10385                                 freesync_capable = true;
10386
10387                         connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10388                         connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10389                 }
10390         }
10391
10392 update:
10393         if (dm_con_state)
10394                 dm_con_state->freesync_capable = freesync_capable;
10395
10396         if (connector->vrr_capable_property)
10397                 drm_connector_set_vrr_capable_property(connector,
10398                                                        freesync_capable);
10399 }
10400
10401 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
10402 {
10403         uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
10404
10405         if (!(link->connector_signal & SIGNAL_TYPE_EDP))
10406                 return;
10407         if (link->type == dc_connection_none)
10408                 return;
10409         if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
10410                                         dpcd_data, sizeof(dpcd_data))) {
10411                 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
10412
10413                 if (dpcd_data[0] == 0) {
10414                         link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
10415                         link->psr_settings.psr_feature_enabled = false;
10416                 } else {
10417                         link->psr_settings.psr_version = DC_PSR_VERSION_1;
10418                         link->psr_settings.psr_feature_enabled = true;
10419                 }
10420
10421                 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
10422         }
10423 }
10424
10425 /*
10426  * amdgpu_dm_link_setup_psr() - configure psr link
10427  * @stream: stream state
10428  *
10429  * Return: true if success
10430  */
10431 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
10432 {
10433         struct dc_link *link = NULL;
10434         struct psr_config psr_config = {0};
10435         struct psr_context psr_context = {0};
10436         bool ret = false;
10437
10438         if (stream == NULL)
10439                 return false;
10440
10441         link = stream->link;
10442
10443         psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
10444
10445         if (psr_config.psr_version > 0) {
10446                 psr_config.psr_exit_link_training_required = 0x1;
10447                 psr_config.psr_frame_capture_indication_req = 0;
10448                 psr_config.psr_rfb_setup_time = 0x37;
10449                 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
10450                 psr_config.allow_smu_optimizations = 0x0;
10451
10452                 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
10453
10454         }
10455         DRM_DEBUG_DRIVER("PSR link: %d\n",      link->psr_settings.psr_feature_enabled);
10456
10457         return ret;
10458 }
10459
10460 /*
10461  * amdgpu_dm_psr_enable() - enable psr f/w
10462  * @stream: stream state
10463  *
10464  * Return: true if success
10465  */
10466 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
10467 {
10468         struct dc_link *link = stream->link;
10469         unsigned int vsync_rate_hz = 0;
10470         struct dc_static_screen_params params = {0};
10471         /* Calculate number of static frames before generating interrupt to
10472          * enter PSR.
10473          */
10474         // Init fail safe of 2 frames static
10475         unsigned int num_frames_static = 2;
10476
10477         DRM_DEBUG_DRIVER("Enabling psr...\n");
10478
10479         vsync_rate_hz = div64_u64(div64_u64((
10480                         stream->timing.pix_clk_100hz * 100),
10481                         stream->timing.v_total),
10482                         stream->timing.h_total);
10483
10484         /* Round up
10485          * Calculate number of frames such that at least 30 ms of time has
10486          * passed.
10487          */
10488         if (vsync_rate_hz != 0) {
10489                 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
10490                 num_frames_static = (30000 / frame_time_microsec) + 1;
10491         }
10492
10493         params.triggers.cursor_update = true;
10494         params.triggers.overlay_update = true;
10495         params.triggers.surface_update = true;
10496         params.num_frames = num_frames_static;
10497
10498         dc_stream_set_static_screen_params(link->ctx->dc,
10499                                            &stream, 1,
10500                                            &params);
10501
10502         return dc_link_set_psr_allow_active(link, true, false, false);
10503 }
10504
10505 /*
10506  * amdgpu_dm_psr_disable() - disable psr f/w
10507  * @stream:  stream state
10508  *
10509  * Return: true if success
10510  */
10511 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
10512 {
10513
10514         DRM_DEBUG_DRIVER("Disabling psr...\n");
10515
10516         return dc_link_set_psr_allow_active(stream->link, false, true, false);
10517 }
10518
10519 /*
10520  * amdgpu_dm_psr_disable() - disable psr f/w
10521  * if psr is enabled on any stream
10522  *
10523  * Return: true if success
10524  */
10525 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
10526 {
10527         DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
10528         return dc_set_psr_allow_active(dm->dc, false);
10529 }
10530
10531 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10532 {
10533         struct amdgpu_device *adev = drm_to_adev(dev);
10534         struct dc *dc = adev->dm.dc;
10535         int i;
10536
10537         mutex_lock(&adev->dm.dc_lock);
10538         if (dc->current_state) {
10539                 for (i = 0; i < dc->current_state->stream_count; ++i)
10540                         dc->current_state->streams[i]
10541                                 ->triggered_crtc_reset.enabled =
10542                                 adev->dm.force_timing_sync;
10543
10544                 dm_enable_per_frame_crtc_master_sync(dc->current_state);
10545                 dc_trigger_sync(dc, dc->current_state);
10546         }
10547         mutex_unlock(&adev->dm.dc_lock);
10548 }
10549
10550 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10551                        uint32_t value, const char *func_name)
10552 {
10553 #ifdef DM_CHECK_ADDR_0
10554         if (address == 0) {
10555                 DC_ERR("invalid register write. address = 0");
10556                 return;
10557         }
10558 #endif
10559         cgs_write_register(ctx->cgs_device, address, value);
10560         trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10561 }
10562
10563 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10564                           const char *func_name)
10565 {
10566         uint32_t value;
10567 #ifdef DM_CHECK_ADDR_0
10568         if (address == 0) {
10569                 DC_ERR("invalid register read; address = 0\n");
10570                 return 0;
10571         }
10572 #endif
10573
10574         if (ctx->dmub_srv &&
10575             ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10576             !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10577                 ASSERT(false);
10578                 return 0;
10579         }
10580
10581         value = cgs_read_register(ctx->cgs_device, address);
10582
10583         trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10584
10585         return value;
10586 }