drm/amd/display: Fix wrong format specifier in amdgpu_dm.c
[platform/kernel/linux-rpi.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "dc/inc/core_types.h"
33 #include "dal_asic_id.h"
34 #include "dmub/dmub_srv.h"
35 #include "dc/inc/hw/dmcu.h"
36 #include "dc/inc/hw/abm.h"
37 #include "dc/dc_dmub_srv.h"
38 #include "dc/dc_edid_parser.h"
39 #include "dc/dc_stat.h"
40 #include "amdgpu_dm_trace.h"
41
42 #include "vid.h"
43 #include "amdgpu.h"
44 #include "amdgpu_display.h"
45 #include "amdgpu_ucode.h"
46 #include "atom.h"
47 #include "amdgpu_dm.h"
48 #ifdef CONFIG_DRM_AMD_DC_HDCP
49 #include "amdgpu_dm_hdcp.h"
50 #include <drm/drm_hdcp.h>
51 #endif
52 #include "amdgpu_pm.h"
53
54 #include "amd_shared.h"
55 #include "amdgpu_dm_irq.h"
56 #include "dm_helpers.h"
57 #include "amdgpu_dm_mst_types.h"
58 #if defined(CONFIG_DEBUG_FS)
59 #include "amdgpu_dm_debugfs.h"
60 #endif
61 #include "amdgpu_dm_psr.h"
62
63 #include "ivsrcid/ivsrcid_vislands30.h"
64
65 #include "i2caux_interface.h"
66 #include <linux/module.h>
67 #include <linux/moduleparam.h>
68 #include <linux/types.h>
69 #include <linux/pm_runtime.h>
70 #include <linux/pci.h>
71 #include <linux/firmware.h>
72 #include <linux/component.h>
73 #include <linux/dmi.h>
74
75 #include <drm/drm_atomic.h>
76 #include <drm/drm_atomic_uapi.h>
77 #include <drm/drm_atomic_helper.h>
78 #include <drm/drm_dp_mst_helper.h>
79 #include <drm/drm_fb_helper.h>
80 #include <drm/drm_fourcc.h>
81 #include <drm/drm_edid.h>
82 #include <drm/drm_vblank.h>
83 #include <drm/drm_audio_component.h>
84
85 #if defined(CONFIG_DRM_AMD_DC_DCN)
86 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
87
88 #include "dcn/dcn_1_0_offset.h"
89 #include "dcn/dcn_1_0_sh_mask.h"
90 #include "soc15_hw_ip.h"
91 #include "vega10_ip_offset.h"
92
93 #include "soc15_common.h"
94 #endif
95
96 #include "modules/inc/mod_freesync.h"
97 #include "modules/power/power_helpers.h"
98 #include "modules/inc/mod_info_packet.h"
99
100 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
102 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
103 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
104 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
105 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
106 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
107 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
108 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
109 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
110 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
111 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
112 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
113 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
114 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
115 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
116
117 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
118 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
119
120 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
121 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
122
123 /* Number of bytes in PSP header for firmware. */
124 #define PSP_HEADER_BYTES 0x100
125
126 /* Number of bytes in PSP footer for firmware. */
127 #define PSP_FOOTER_BYTES 0x100
128
129 /**
130  * DOC: overview
131  *
132  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
133  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
134  * requests into DC requests, and DC responses into DRM responses.
135  *
136  * The root control structure is &struct amdgpu_display_manager.
137  */
138
139 /* basic init/fini API */
140 static int amdgpu_dm_init(struct amdgpu_device *adev);
141 static void amdgpu_dm_fini(struct amdgpu_device *adev);
142 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
143
144 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
145 {
146         switch (link->dpcd_caps.dongle_type) {
147         case DISPLAY_DONGLE_NONE:
148                 return DRM_MODE_SUBCONNECTOR_Native;
149         case DISPLAY_DONGLE_DP_VGA_CONVERTER:
150                 return DRM_MODE_SUBCONNECTOR_VGA;
151         case DISPLAY_DONGLE_DP_DVI_CONVERTER:
152         case DISPLAY_DONGLE_DP_DVI_DONGLE:
153                 return DRM_MODE_SUBCONNECTOR_DVID;
154         case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
155         case DISPLAY_DONGLE_DP_HDMI_DONGLE:
156                 return DRM_MODE_SUBCONNECTOR_HDMIA;
157         case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
158         default:
159                 return DRM_MODE_SUBCONNECTOR_Unknown;
160         }
161 }
162
163 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
164 {
165         struct dc_link *link = aconnector->dc_link;
166         struct drm_connector *connector = &aconnector->base;
167         enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
168
169         if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
170                 return;
171
172         if (aconnector->dc_sink)
173                 subconnector = get_subconnector_type(link);
174
175         drm_object_property_set_value(&connector->base,
176                         connector->dev->mode_config.dp_subconnector_property,
177                         subconnector);
178 }
179
180 /*
181  * initializes drm_device display related structures, based on the information
182  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
183  * drm_encoder, drm_mode_config
184  *
185  * Returns 0 on success
186  */
187 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
188 /* removes and deallocates the drm structures, created by the above function */
189 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
190
191 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
192                                 struct drm_plane *plane,
193                                 unsigned long possible_crtcs,
194                                 const struct dc_plane_cap *plane_cap);
195 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
196                                struct drm_plane *plane,
197                                uint32_t link_index);
198 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
199                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
200                                     uint32_t link_index,
201                                     struct amdgpu_encoder *amdgpu_encoder);
202 static int amdgpu_dm_encoder_init(struct drm_device *dev,
203                                   struct amdgpu_encoder *aencoder,
204                                   uint32_t link_index);
205
206 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
207
208 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
209
210 static int amdgpu_dm_atomic_check(struct drm_device *dev,
211                                   struct drm_atomic_state *state);
212
213 static void handle_cursor_update(struct drm_plane *plane,
214                                  struct drm_plane_state *old_plane_state);
215
216 static const struct drm_format_info *
217 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
218
219 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
220
221 static bool
222 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
223                                  struct drm_crtc_state *new_crtc_state);
224 /*
225  * dm_vblank_get_counter
226  *
227  * @brief
228  * Get counter for number of vertical blanks
229  *
230  * @param
231  * struct amdgpu_device *adev - [in] desired amdgpu device
232  * int disp_idx - [in] which CRTC to get the counter from
233  *
234  * @return
235  * Counter for vertical blanks
236  */
237 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
238 {
239         if (crtc >= adev->mode_info.num_crtc)
240                 return 0;
241         else {
242                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
243
244                 if (acrtc->dm_irq_params.stream == NULL) {
245                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
246                                   crtc);
247                         return 0;
248                 }
249
250                 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
251         }
252 }
253
254 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
255                                   u32 *vbl, u32 *position)
256 {
257         uint32_t v_blank_start, v_blank_end, h_position, v_position;
258
259         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
260                 return -EINVAL;
261         else {
262                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
263
264                 if (acrtc->dm_irq_params.stream ==  NULL) {
265                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
266                                   crtc);
267                         return 0;
268                 }
269
270                 /*
271                  * TODO rework base driver to use values directly.
272                  * for now parse it back into reg-format
273                  */
274                 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
275                                          &v_blank_start,
276                                          &v_blank_end,
277                                          &h_position,
278                                          &v_position);
279
280                 *position = v_position | (h_position << 16);
281                 *vbl = v_blank_start | (v_blank_end << 16);
282         }
283
284         return 0;
285 }
286
287 static bool dm_is_idle(void *handle)
288 {
289         /* XXX todo */
290         return true;
291 }
292
293 static int dm_wait_for_idle(void *handle)
294 {
295         /* XXX todo */
296         return 0;
297 }
298
299 static bool dm_check_soft_reset(void *handle)
300 {
301         return false;
302 }
303
304 static int dm_soft_reset(void *handle)
305 {
306         /* XXX todo */
307         return 0;
308 }
309
310 static struct amdgpu_crtc *
311 get_crtc_by_otg_inst(struct amdgpu_device *adev,
312                      int otg_inst)
313 {
314         struct drm_device *dev = adev_to_drm(adev);
315         struct drm_crtc *crtc;
316         struct amdgpu_crtc *amdgpu_crtc;
317
318         if (WARN_ON(otg_inst == -1))
319                 return adev->mode_info.crtcs[0];
320
321         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
322                 amdgpu_crtc = to_amdgpu_crtc(crtc);
323
324                 if (amdgpu_crtc->otg_inst == otg_inst)
325                         return amdgpu_crtc;
326         }
327
328         return NULL;
329 }
330
331 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
332 {
333         return acrtc->dm_irq_params.freesync_config.state ==
334                        VRR_STATE_ACTIVE_VARIABLE ||
335                acrtc->dm_irq_params.freesync_config.state ==
336                        VRR_STATE_ACTIVE_FIXED;
337 }
338
339 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
340 {
341         return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
342                dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
343 }
344
345 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
346                                               struct dm_crtc_state *new_state)
347 {
348         if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
349                 return true;
350         else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
351                 return true;
352         else
353                 return false;
354 }
355
356 /**
357  * dm_pflip_high_irq() - Handle pageflip interrupt
358  * @interrupt_params: ignored
359  *
360  * Handles the pageflip interrupt by notifying all interested parties
361  * that the pageflip has been completed.
362  */
363 static void dm_pflip_high_irq(void *interrupt_params)
364 {
365         struct amdgpu_crtc *amdgpu_crtc;
366         struct common_irq_params *irq_params = interrupt_params;
367         struct amdgpu_device *adev = irq_params->adev;
368         unsigned long flags;
369         struct drm_pending_vblank_event *e;
370         uint32_t vpos, hpos, v_blank_start, v_blank_end;
371         bool vrr_active;
372
373         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
374
375         /* IRQ could occur when in initial stage */
376         /* TODO work and BO cleanup */
377         if (amdgpu_crtc == NULL) {
378                 DC_LOG_PFLIP("CRTC is null, returning.\n");
379                 return;
380         }
381
382         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
383
384         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
385                 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
386                                                  amdgpu_crtc->pflip_status,
387                                                  AMDGPU_FLIP_SUBMITTED,
388                                                  amdgpu_crtc->crtc_id,
389                                                  amdgpu_crtc);
390                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
391                 return;
392         }
393
394         /* page flip completed. */
395         e = amdgpu_crtc->event;
396         amdgpu_crtc->event = NULL;
397
398         WARN_ON(!e);
399
400         vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
401
402         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
403         if (!vrr_active ||
404             !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
405                                       &v_blank_end, &hpos, &vpos) ||
406             (vpos < v_blank_start)) {
407                 /* Update to correct count and vblank timestamp if racing with
408                  * vblank irq. This also updates to the correct vblank timestamp
409                  * even in VRR mode, as scanout is past the front-porch atm.
410                  */
411                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
412
413                 /* Wake up userspace by sending the pageflip event with proper
414                  * count and timestamp of vblank of flip completion.
415                  */
416                 if (e) {
417                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
418
419                         /* Event sent, so done with vblank for this flip */
420                         drm_crtc_vblank_put(&amdgpu_crtc->base);
421                 }
422         } else if (e) {
423                 /* VRR active and inside front-porch: vblank count and
424                  * timestamp for pageflip event will only be up to date after
425                  * drm_crtc_handle_vblank() has been executed from late vblank
426                  * irq handler after start of back-porch (vline 0). We queue the
427                  * pageflip event for send-out by drm_crtc_handle_vblank() with
428                  * updated timestamp and count, once it runs after us.
429                  *
430                  * We need to open-code this instead of using the helper
431                  * drm_crtc_arm_vblank_event(), as that helper would
432                  * call drm_crtc_accurate_vblank_count(), which we must
433                  * not call in VRR mode while we are in front-porch!
434                  */
435
436                 /* sequence will be replaced by real count during send-out. */
437                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
438                 e->pipe = amdgpu_crtc->crtc_id;
439
440                 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
441                 e = NULL;
442         }
443
444         /* Keep track of vblank of this flip for flip throttling. We use the
445          * cooked hw counter, as that one incremented at start of this vblank
446          * of pageflip completion, so last_flip_vblank is the forbidden count
447          * for queueing new pageflips if vsync + VRR is enabled.
448          */
449         amdgpu_crtc->dm_irq_params.last_flip_vblank =
450                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
451
452         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
453         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
454
455         DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
456                      amdgpu_crtc->crtc_id, amdgpu_crtc,
457                      vrr_active, (int) !e);
458 }
459
460 static void dm_vupdate_high_irq(void *interrupt_params)
461 {
462         struct common_irq_params *irq_params = interrupt_params;
463         struct amdgpu_device *adev = irq_params->adev;
464         struct amdgpu_crtc *acrtc;
465         struct drm_device *drm_dev;
466         struct drm_vblank_crtc *vblank;
467         ktime_t frame_duration_ns, previous_timestamp;
468         unsigned long flags;
469         int vrr_active;
470
471         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
472
473         if (acrtc) {
474                 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
475                 drm_dev = acrtc->base.dev;
476                 vblank = &drm_dev->vblank[acrtc->base.index];
477                 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
478                 frame_duration_ns = vblank->time - previous_timestamp;
479
480                 if (frame_duration_ns > 0) {
481                         trace_amdgpu_refresh_rate_track(acrtc->base.index,
482                                                 frame_duration_ns,
483                                                 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
484                         atomic64_set(&irq_params->previous_timestamp, vblank->time);
485                 }
486
487                 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
488                               acrtc->crtc_id,
489                               vrr_active);
490
491                 /* Core vblank handling is done here after end of front-porch in
492                  * vrr mode, as vblank timestamping will give valid results
493                  * while now done after front-porch. This will also deliver
494                  * page-flip completion events that have been queued to us
495                  * if a pageflip happened inside front-porch.
496                  */
497                 if (vrr_active) {
498                         drm_crtc_handle_vblank(&acrtc->base);
499
500                         /* BTR processing for pre-DCE12 ASICs */
501                         if (acrtc->dm_irq_params.stream &&
502                             adev->family < AMDGPU_FAMILY_AI) {
503                                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
504                                 mod_freesync_handle_v_update(
505                                     adev->dm.freesync_module,
506                                     acrtc->dm_irq_params.stream,
507                                     &acrtc->dm_irq_params.vrr_params);
508
509                                 dc_stream_adjust_vmin_vmax(
510                                     adev->dm.dc,
511                                     acrtc->dm_irq_params.stream,
512                                     &acrtc->dm_irq_params.vrr_params.adjust);
513                                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
514                         }
515                 }
516         }
517 }
518
519 /**
520  * dm_crtc_high_irq() - Handles CRTC interrupt
521  * @interrupt_params: used for determining the CRTC instance
522  *
523  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
524  * event handler.
525  */
526 static void dm_crtc_high_irq(void *interrupt_params)
527 {
528         struct common_irq_params *irq_params = interrupt_params;
529         struct amdgpu_device *adev = irq_params->adev;
530         struct amdgpu_crtc *acrtc;
531         unsigned long flags;
532         int vrr_active;
533
534         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
535         if (!acrtc)
536                 return;
537
538         vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
539
540         DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
541                       vrr_active, acrtc->dm_irq_params.active_planes);
542
543         /**
544          * Core vblank handling at start of front-porch is only possible
545          * in non-vrr mode, as only there vblank timestamping will give
546          * valid results while done in front-porch. Otherwise defer it
547          * to dm_vupdate_high_irq after end of front-porch.
548          */
549         if (!vrr_active)
550                 drm_crtc_handle_vblank(&acrtc->base);
551
552         /**
553          * Following stuff must happen at start of vblank, for crc
554          * computation and below-the-range btr support in vrr mode.
555          */
556         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
557
558         /* BTR updates need to happen before VUPDATE on Vega and above. */
559         if (adev->family < AMDGPU_FAMILY_AI)
560                 return;
561
562         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
563
564         if (acrtc->dm_irq_params.stream &&
565             acrtc->dm_irq_params.vrr_params.supported &&
566             acrtc->dm_irq_params.freesync_config.state ==
567                     VRR_STATE_ACTIVE_VARIABLE) {
568                 mod_freesync_handle_v_update(adev->dm.freesync_module,
569                                              acrtc->dm_irq_params.stream,
570                                              &acrtc->dm_irq_params.vrr_params);
571
572                 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
573                                            &acrtc->dm_irq_params.vrr_params.adjust);
574         }
575
576         /*
577          * If there aren't any active_planes then DCH HUBP may be clock-gated.
578          * In that case, pageflip completion interrupts won't fire and pageflip
579          * completion events won't get delivered. Prevent this by sending
580          * pending pageflip events from here if a flip is still pending.
581          *
582          * If any planes are enabled, use dm_pflip_high_irq() instead, to
583          * avoid race conditions between flip programming and completion,
584          * which could cause too early flip completion events.
585          */
586         if (adev->family >= AMDGPU_FAMILY_RV &&
587             acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
588             acrtc->dm_irq_params.active_planes == 0) {
589                 if (acrtc->event) {
590                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
591                         acrtc->event = NULL;
592                         drm_crtc_vblank_put(&acrtc->base);
593                 }
594                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
595         }
596
597         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
598 }
599
600 #if defined(CONFIG_DRM_AMD_DC_DCN)
601 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
602 /**
603  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
604  * DCN generation ASICs
605  * @interrupt_params: interrupt parameters
606  *
607  * Used to set crc window/read out crc value at vertical line 0 position
608  */
609 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
610 {
611         struct common_irq_params *irq_params = interrupt_params;
612         struct amdgpu_device *adev = irq_params->adev;
613         struct amdgpu_crtc *acrtc;
614
615         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
616
617         if (!acrtc)
618                 return;
619
620         amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
621 }
622 #endif
623
624 /**
625  * dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command.
626  * @adev: amdgpu_device pointer
627  * @notify: dmub notification structure
628  *
629  * Dmub AUX or SET_CONFIG command completion processing callback
630  * Copies dmub notification to DM which is to be read by AUX command.
631  * issuing thread and also signals the event to wake up the thread.
632  */
633 void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
634 {
635         if (adev->dm.dmub_notify)
636                 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
637         if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
638                 complete(&adev->dm.dmub_aux_transfer_done);
639 }
640
641 /**
642  * dmub_hpd_callback - DMUB HPD interrupt processing callback.
643  * @adev: amdgpu_device pointer
644  * @notify: dmub notification structure
645  *
646  * Dmub Hpd interrupt processing callback. Gets displayindex through the
647  * ink index and calls helper to do the processing.
648  */
649 void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
650 {
651         struct amdgpu_dm_connector *aconnector;
652         struct drm_connector *connector;
653         struct drm_connector_list_iter iter;
654         struct dc_link *link;
655         uint8_t link_index = 0;
656         struct drm_device *dev;
657
658         if (adev == NULL)
659                 return;
660
661         if (notify == NULL) {
662                 DRM_ERROR("DMUB HPD callback notification was NULL");
663                 return;
664         }
665
666         if (notify->link_index > adev->dm.dc->link_count) {
667                 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
668                 return;
669         }
670
671         link_index = notify->link_index;
672         link = adev->dm.dc->links[link_index];
673         dev = adev->dm.ddev;
674
675         drm_connector_list_iter_begin(dev, &iter);
676         drm_for_each_connector_iter(connector, &iter) {
677                 aconnector = to_amdgpu_dm_connector(connector);
678                 if (link && aconnector->dc_link == link) {
679                         DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
680                         handle_hpd_irq_helper(aconnector);
681                         break;
682                 }
683         }
684         drm_connector_list_iter_end(&iter);
685
686 }
687
688 /**
689  * register_dmub_notify_callback - Sets callback for DMUB notify
690  * @adev: amdgpu_device pointer
691  * @type: Type of dmub notification
692  * @callback: Dmub interrupt callback function
693  * @dmub_int_thread_offload: offload indicator
694  *
695  * API to register a dmub callback handler for a dmub notification
696  * Also sets indicator whether callback processing to be offloaded.
697  * to dmub interrupt handling thread
698  * Return: true if successfully registered, false if there is existing registration
699  */
700 bool register_dmub_notify_callback(struct amdgpu_device *adev, enum dmub_notification_type type,
701 dmub_notify_interrupt_callback_t callback, bool dmub_int_thread_offload)
702 {
703         if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
704                 adev->dm.dmub_callback[type] = callback;
705                 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
706         } else
707                 return false;
708
709         return true;
710 }
711
712 static void dm_handle_hpd_work(struct work_struct *work)
713 {
714         struct dmub_hpd_work *dmub_hpd_wrk;
715
716         dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
717
718         if (!dmub_hpd_wrk->dmub_notify) {
719                 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
720                 return;
721         }
722
723         if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
724                 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
725                 dmub_hpd_wrk->dmub_notify);
726         }
727         kfree(dmub_hpd_wrk);
728
729 }
730
731 #define DMUB_TRACE_MAX_READ 64
732 /**
733  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
734  * @interrupt_params: used for determining the Outbox instance
735  *
736  * Handles the Outbox Interrupt
737  * event handler.
738  */
739 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
740 {
741         struct dmub_notification notify;
742         struct common_irq_params *irq_params = interrupt_params;
743         struct amdgpu_device *adev = irq_params->adev;
744         struct amdgpu_display_manager *dm = &adev->dm;
745         struct dmcub_trace_buf_entry entry = { 0 };
746         uint32_t count = 0;
747         struct dmub_hpd_work *dmub_hpd_wrk;
748
749         if (dc_enable_dmub_notifications(adev->dm.dc)) {
750                 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
751                 if (!dmub_hpd_wrk) {
752                         DRM_ERROR("Failed to allocate dmub_hpd_wrk");
753                         return;
754                 }
755                 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
756
757                 if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
758                         do {
759                                 dc_stat_get_dmub_notification(adev->dm.dc, &notify);
760                                 if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
761                                         DRM_ERROR("DM: notify type %d larger than the array size %zu!", notify.type,
762                                         ARRAY_SIZE(dm->dmub_thread_offload));
763                                         continue;
764                                 }
765                                 if (dm->dmub_thread_offload[notify.type] == true) {
766                                         dmub_hpd_wrk->dmub_notify = &notify;
767                                         dmub_hpd_wrk->adev = adev;
768                                         queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
769                                 } else {
770                                         dm->dmub_callback[notify.type](adev, &notify);
771                                 }
772
773                         } while (notify.pending_notification);
774
775                 } else {
776                         DRM_ERROR("DM: Failed to receive correct outbox IRQ !");
777                 }
778         }
779
780
781         do {
782                 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
783                         trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
784                                                         entry.param0, entry.param1);
785
786                         DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
787                                  entry.trace_code, entry.tick_count, entry.param0, entry.param1);
788                 } else
789                         break;
790
791                 count++;
792
793         } while (count <= DMUB_TRACE_MAX_READ);
794
795         ASSERT(count <= DMUB_TRACE_MAX_READ);
796 }
797 #endif
798
799 static int dm_set_clockgating_state(void *handle,
800                   enum amd_clockgating_state state)
801 {
802         return 0;
803 }
804
805 static int dm_set_powergating_state(void *handle,
806                   enum amd_powergating_state state)
807 {
808         return 0;
809 }
810
811 /* Prototypes of private functions */
812 static int dm_early_init(void* handle);
813
814 /* Allocate memory for FBC compressed data  */
815 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
816 {
817         struct drm_device *dev = connector->dev;
818         struct amdgpu_device *adev = drm_to_adev(dev);
819         struct dm_compressor_info *compressor = &adev->dm.compressor;
820         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
821         struct drm_display_mode *mode;
822         unsigned long max_size = 0;
823
824         if (adev->dm.dc->fbc_compressor == NULL)
825                 return;
826
827         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
828                 return;
829
830         if (compressor->bo_ptr)
831                 return;
832
833
834         list_for_each_entry(mode, &connector->modes, head) {
835                 if (max_size < mode->htotal * mode->vtotal)
836                         max_size = mode->htotal * mode->vtotal;
837         }
838
839         if (max_size) {
840                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
841                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
842                             &compressor->gpu_addr, &compressor->cpu_addr);
843
844                 if (r)
845                         DRM_ERROR("DM: Failed to initialize FBC\n");
846                 else {
847                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
848                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
849                 }
850
851         }
852
853 }
854
855 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
856                                           int pipe, bool *enabled,
857                                           unsigned char *buf, int max_bytes)
858 {
859         struct drm_device *dev = dev_get_drvdata(kdev);
860         struct amdgpu_device *adev = drm_to_adev(dev);
861         struct drm_connector *connector;
862         struct drm_connector_list_iter conn_iter;
863         struct amdgpu_dm_connector *aconnector;
864         int ret = 0;
865
866         *enabled = false;
867
868         mutex_lock(&adev->dm.audio_lock);
869
870         drm_connector_list_iter_begin(dev, &conn_iter);
871         drm_for_each_connector_iter(connector, &conn_iter) {
872                 aconnector = to_amdgpu_dm_connector(connector);
873                 if (aconnector->audio_inst != port)
874                         continue;
875
876                 *enabled = true;
877                 ret = drm_eld_size(connector->eld);
878                 memcpy(buf, connector->eld, min(max_bytes, ret));
879
880                 break;
881         }
882         drm_connector_list_iter_end(&conn_iter);
883
884         mutex_unlock(&adev->dm.audio_lock);
885
886         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
887
888         return ret;
889 }
890
891 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
892         .get_eld = amdgpu_dm_audio_component_get_eld,
893 };
894
895 static int amdgpu_dm_audio_component_bind(struct device *kdev,
896                                        struct device *hda_kdev, void *data)
897 {
898         struct drm_device *dev = dev_get_drvdata(kdev);
899         struct amdgpu_device *adev = drm_to_adev(dev);
900         struct drm_audio_component *acomp = data;
901
902         acomp->ops = &amdgpu_dm_audio_component_ops;
903         acomp->dev = kdev;
904         adev->dm.audio_component = acomp;
905
906         return 0;
907 }
908
909 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
910                                           struct device *hda_kdev, void *data)
911 {
912         struct drm_device *dev = dev_get_drvdata(kdev);
913         struct amdgpu_device *adev = drm_to_adev(dev);
914         struct drm_audio_component *acomp = data;
915
916         acomp->ops = NULL;
917         acomp->dev = NULL;
918         adev->dm.audio_component = NULL;
919 }
920
921 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
922         .bind   = amdgpu_dm_audio_component_bind,
923         .unbind = amdgpu_dm_audio_component_unbind,
924 };
925
926 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
927 {
928         int i, ret;
929
930         if (!amdgpu_audio)
931                 return 0;
932
933         adev->mode_info.audio.enabled = true;
934
935         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
936
937         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
938                 adev->mode_info.audio.pin[i].channels = -1;
939                 adev->mode_info.audio.pin[i].rate = -1;
940                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
941                 adev->mode_info.audio.pin[i].status_bits = 0;
942                 adev->mode_info.audio.pin[i].category_code = 0;
943                 adev->mode_info.audio.pin[i].connected = false;
944                 adev->mode_info.audio.pin[i].id =
945                         adev->dm.dc->res_pool->audios[i]->inst;
946                 adev->mode_info.audio.pin[i].offset = 0;
947         }
948
949         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
950         if (ret < 0)
951                 return ret;
952
953         adev->dm.audio_registered = true;
954
955         return 0;
956 }
957
958 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
959 {
960         if (!amdgpu_audio)
961                 return;
962
963         if (!adev->mode_info.audio.enabled)
964                 return;
965
966         if (adev->dm.audio_registered) {
967                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
968                 adev->dm.audio_registered = false;
969         }
970
971         /* TODO: Disable audio? */
972
973         adev->mode_info.audio.enabled = false;
974 }
975
976 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
977 {
978         struct drm_audio_component *acomp = adev->dm.audio_component;
979
980         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
981                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
982
983                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
984                                                  pin, -1);
985         }
986 }
987
988 static int dm_dmub_hw_init(struct amdgpu_device *adev)
989 {
990         const struct dmcub_firmware_header_v1_0 *hdr;
991         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
992         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
993         const struct firmware *dmub_fw = adev->dm.dmub_fw;
994         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
995         struct abm *abm = adev->dm.dc->res_pool->abm;
996         struct dmub_srv_hw_params hw_params;
997         enum dmub_status status;
998         const unsigned char *fw_inst_const, *fw_bss_data;
999         uint32_t i, fw_inst_const_size, fw_bss_data_size;
1000         bool has_hw_support;
1001
1002         if (!dmub_srv)
1003                 /* DMUB isn't supported on the ASIC. */
1004                 return 0;
1005
1006         if (!fb_info) {
1007                 DRM_ERROR("No framebuffer info for DMUB service.\n");
1008                 return -EINVAL;
1009         }
1010
1011         if (!dmub_fw) {
1012                 /* Firmware required for DMUB support. */
1013                 DRM_ERROR("No firmware provided for DMUB.\n");
1014                 return -EINVAL;
1015         }
1016
1017         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1018         if (status != DMUB_STATUS_OK) {
1019                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1020                 return -EINVAL;
1021         }
1022
1023         if (!has_hw_support) {
1024                 DRM_INFO("DMUB unsupported on ASIC\n");
1025                 return 0;
1026         }
1027
1028         /* Reset DMCUB if it was previously running - before we overwrite its memory. */
1029         status = dmub_srv_hw_reset(dmub_srv);
1030         if (status != DMUB_STATUS_OK)
1031                 DRM_WARN("Error resetting DMUB HW: %d\n", status);
1032
1033         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1034
1035         fw_inst_const = dmub_fw->data +
1036                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1037                         PSP_HEADER_BYTES;
1038
1039         fw_bss_data = dmub_fw->data +
1040                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1041                       le32_to_cpu(hdr->inst_const_bytes);
1042
1043         /* Copy firmware and bios info into FB memory. */
1044         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1045                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1046
1047         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1048
1049         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1050          * amdgpu_ucode_init_single_fw will load dmub firmware
1051          * fw_inst_const part to cw0; otherwise, the firmware back door load
1052          * will be done by dm_dmub_hw_init
1053          */
1054         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1055                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1056                                 fw_inst_const_size);
1057         }
1058
1059         if (fw_bss_data_size)
1060                 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1061                        fw_bss_data, fw_bss_data_size);
1062
1063         /* Copy firmware bios info into FB memory. */
1064         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1065                adev->bios_size);
1066
1067         /* Reset regions that need to be reset. */
1068         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1069         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1070
1071         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1072                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1073
1074         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1075                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1076
1077         /* Initialize hardware. */
1078         memset(&hw_params, 0, sizeof(hw_params));
1079         hw_params.fb_base = adev->gmc.fb_start;
1080         hw_params.fb_offset = adev->gmc.aper_base;
1081
1082         /* backdoor load firmware and trigger dmub running */
1083         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1084                 hw_params.load_inst_const = true;
1085
1086         if (dmcu)
1087                 hw_params.psp_version = dmcu->psp_version;
1088
1089         for (i = 0; i < fb_info->num_fb; ++i)
1090                 hw_params.fb[i] = &fb_info->fb[i];
1091
1092         status = dmub_srv_hw_init(dmub_srv, &hw_params);
1093         if (status != DMUB_STATUS_OK) {
1094                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1095                 return -EINVAL;
1096         }
1097
1098         /* Wait for firmware load to finish. */
1099         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1100         if (status != DMUB_STATUS_OK)
1101                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1102
1103         /* Init DMCU and ABM if available. */
1104         if (dmcu && abm) {
1105                 dmcu->funcs->dmcu_init(dmcu);
1106                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1107         }
1108
1109         if (!adev->dm.dc->ctx->dmub_srv)
1110                 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1111         if (!adev->dm.dc->ctx->dmub_srv) {
1112                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1113                 return -ENOMEM;
1114         }
1115
1116         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1117                  adev->dm.dmcub_fw_version);
1118
1119         return 0;
1120 }
1121
1122 static void dm_dmub_hw_resume(struct amdgpu_device *adev)
1123 {
1124         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1125         enum dmub_status status;
1126         bool init;
1127
1128         if (!dmub_srv) {
1129                 /* DMUB isn't supported on the ASIC. */
1130                 return;
1131         }
1132
1133         status = dmub_srv_is_hw_init(dmub_srv, &init);
1134         if (status != DMUB_STATUS_OK)
1135                 DRM_WARN("DMUB hardware init check failed: %d\n", status);
1136
1137         if (status == DMUB_STATUS_OK && init) {
1138                 /* Wait for firmware load to finish. */
1139                 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1140                 if (status != DMUB_STATUS_OK)
1141                         DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1142         } else {
1143                 /* Perform the full hardware initialization. */
1144                 dm_dmub_hw_init(adev);
1145         }
1146 }
1147
1148 #if defined(CONFIG_DRM_AMD_DC_DCN)
1149 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1150 {
1151         uint64_t pt_base;
1152         uint32_t logical_addr_low;
1153         uint32_t logical_addr_high;
1154         uint32_t agp_base, agp_bot, agp_top;
1155         PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1156
1157         memset(pa_config, 0, sizeof(*pa_config));
1158
1159         logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1160         pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1161
1162         if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1163                 /*
1164                  * Raven2 has a HW issue that it is unable to use the vram which
1165                  * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1166                  * workaround that increase system aperture high address (add 1)
1167                  * to get rid of the VM fault and hardware hang.
1168                  */
1169                 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1170         else
1171                 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1172
1173         agp_base = 0;
1174         agp_bot = adev->gmc.agp_start >> 24;
1175         agp_top = adev->gmc.agp_end >> 24;
1176
1177
1178         page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1179         page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1180         page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1181         page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1182         page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1183         page_table_base.low_part = lower_32_bits(pt_base);
1184
1185         pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1186         pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1187
1188         pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1189         pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1190         pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1191
1192         pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1193         pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1194         pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1195
1196         pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1197         pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1198         pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1199
1200         pa_config->is_hvm_enabled = 0;
1201
1202 }
1203 #endif
1204 #if defined(CONFIG_DRM_AMD_DC_DCN)
1205 static void vblank_control_worker(struct work_struct *work)
1206 {
1207         struct vblank_control_work *vblank_work =
1208                 container_of(work, struct vblank_control_work, work);
1209         struct amdgpu_display_manager *dm = vblank_work->dm;
1210
1211         mutex_lock(&dm->dc_lock);
1212
1213         if (vblank_work->enable)
1214                 dm->active_vblank_irq_count++;
1215         else if(dm->active_vblank_irq_count)
1216                 dm->active_vblank_irq_count--;
1217
1218         dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1219
1220         DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1221
1222         /* Control PSR based on vblank requirements from OS */
1223         if (vblank_work->stream && vblank_work->stream->link) {
1224                 if (vblank_work->enable) {
1225                         if (vblank_work->stream->link->psr_settings.psr_allow_active)
1226                                 amdgpu_dm_psr_disable(vblank_work->stream);
1227                 } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1228                            !vblank_work->stream->link->psr_settings.psr_allow_active &&
1229                            vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1230                         amdgpu_dm_psr_enable(vblank_work->stream);
1231                 }
1232         }
1233
1234         mutex_unlock(&dm->dc_lock);
1235
1236         dc_stream_release(vblank_work->stream);
1237
1238         kfree(vblank_work);
1239 }
1240
1241 #endif
1242
1243 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1244 {
1245         struct hpd_rx_irq_offload_work *offload_work;
1246         struct amdgpu_dm_connector *aconnector;
1247         struct dc_link *dc_link;
1248         struct amdgpu_device *adev;
1249         enum dc_connection_type new_connection_type = dc_connection_none;
1250         unsigned long flags;
1251
1252         offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1253         aconnector = offload_work->offload_wq->aconnector;
1254
1255         if (!aconnector) {
1256                 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1257                 goto skip;
1258         }
1259
1260         adev = drm_to_adev(aconnector->base.dev);
1261         dc_link = aconnector->dc_link;
1262
1263         mutex_lock(&aconnector->hpd_lock);
1264         if (!dc_link_detect_sink(dc_link, &new_connection_type))
1265                 DRM_ERROR("KMS: Failed to detect connector\n");
1266         mutex_unlock(&aconnector->hpd_lock);
1267
1268         if (new_connection_type == dc_connection_none)
1269                 goto skip;
1270
1271         if (amdgpu_in_reset(adev))
1272                 goto skip;
1273
1274         mutex_lock(&adev->dm.dc_lock);
1275         if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1276                 dc_link_dp_handle_automated_test(dc_link);
1277         else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1278                         hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1279                         dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1280                 dc_link_dp_handle_link_loss(dc_link);
1281                 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1282                 offload_work->offload_wq->is_handling_link_loss = false;
1283                 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1284         }
1285         mutex_unlock(&adev->dm.dc_lock);
1286
1287 skip:
1288         kfree(offload_work);
1289
1290 }
1291
1292 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1293 {
1294         int max_caps = dc->caps.max_links;
1295         int i = 0;
1296         struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1297
1298         hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1299
1300         if (!hpd_rx_offload_wq)
1301                 return NULL;
1302
1303
1304         for (i = 0; i < max_caps; i++) {
1305                 hpd_rx_offload_wq[i].wq =
1306                                     create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1307
1308                 if (hpd_rx_offload_wq[i].wq == NULL) {
1309                         DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1310                         return NULL;
1311                 }
1312
1313                 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1314         }
1315
1316         return hpd_rx_offload_wq;
1317 }
1318
1319 struct amdgpu_stutter_quirk {
1320         u16 chip_vendor;
1321         u16 chip_device;
1322         u16 subsys_vendor;
1323         u16 subsys_device;
1324         u8 revision;
1325 };
1326
1327 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1328         /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1329         { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1330         { 0, 0, 0, 0, 0 },
1331 };
1332
1333 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1334 {
1335         const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1336
1337         while (p && p->chip_device != 0) {
1338                 if (pdev->vendor == p->chip_vendor &&
1339                     pdev->device == p->chip_device &&
1340                     pdev->subsystem_vendor == p->subsys_vendor &&
1341                     pdev->subsystem_device == p->subsys_device &&
1342                     pdev->revision == p->revision) {
1343                         return true;
1344                 }
1345                 ++p;
1346         }
1347         return false;
1348 }
1349
1350 static const struct dmi_system_id hpd_disconnect_quirk_table[] = {
1351         {
1352                 .matches = {
1353                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1354                         DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
1355                 },
1356         },
1357         {
1358                 .matches = {
1359                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1360                         DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
1361                 },
1362         },
1363         {
1364                 .matches = {
1365                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1366                         DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
1367                 },
1368         },
1369         {}
1370 };
1371
1372 static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
1373 {
1374         const struct dmi_system_id *dmi_id;
1375
1376         dm->aux_hpd_discon_quirk = false;
1377
1378         dmi_id = dmi_first_match(hpd_disconnect_quirk_table);
1379         if (dmi_id) {
1380                 dm->aux_hpd_discon_quirk = true;
1381                 DRM_INFO("aux_hpd_discon_quirk attached\n");
1382         }
1383 }
1384
1385 static int amdgpu_dm_init(struct amdgpu_device *adev)
1386 {
1387         struct dc_init_data init_data;
1388 #ifdef CONFIG_DRM_AMD_DC_HDCP
1389         struct dc_callback_init init_params;
1390 #endif
1391         int r;
1392
1393         adev->dm.ddev = adev_to_drm(adev);
1394         adev->dm.adev = adev;
1395
1396         /* Zero all the fields */
1397         memset(&init_data, 0, sizeof(init_data));
1398 #ifdef CONFIG_DRM_AMD_DC_HDCP
1399         memset(&init_params, 0, sizeof(init_params));
1400 #endif
1401
1402         mutex_init(&adev->dm.dc_lock);
1403         mutex_init(&adev->dm.audio_lock);
1404 #if defined(CONFIG_DRM_AMD_DC_DCN)
1405         spin_lock_init(&adev->dm.vblank_lock);
1406 #endif
1407
1408         if(amdgpu_dm_irq_init(adev)) {
1409                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1410                 goto error;
1411         }
1412
1413         init_data.asic_id.chip_family = adev->family;
1414
1415         init_data.asic_id.pci_revision_id = adev->pdev->revision;
1416         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1417         init_data.asic_id.chip_id = adev->pdev->device;
1418
1419         init_data.asic_id.vram_width = adev->gmc.vram_width;
1420         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1421         init_data.asic_id.atombios_base_address =
1422                 adev->mode_info.atom_context->bios;
1423
1424         init_data.driver = adev;
1425
1426         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1427
1428         if (!adev->dm.cgs_device) {
1429                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1430                 goto error;
1431         }
1432
1433         init_data.cgs_device = adev->dm.cgs_device;
1434
1435         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1436
1437         switch (adev->asic_type) {
1438         case CHIP_CARRIZO:
1439         case CHIP_STONEY:
1440         case CHIP_RAVEN:
1441         case CHIP_RENOIR:
1442                 init_data.flags.gpu_vm_support = true;
1443                 switch (adev->dm.dmcub_fw_version) {
1444                 case 0: /* development */
1445                 case 0x1: /* linux-firmware.git hash 6d9f399 */
1446                 case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1447                         init_data.flags.disable_dmcu = false;
1448                         break;
1449                 default:
1450                         init_data.flags.disable_dmcu = true;
1451                 }
1452                 break;
1453         case CHIP_VANGOGH:
1454         case CHIP_YELLOW_CARP:
1455                 init_data.flags.gpu_vm_support = true;
1456                 break;
1457         default:
1458                 break;
1459         }
1460
1461         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1462                 init_data.flags.fbc_support = true;
1463
1464         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1465                 init_data.flags.multi_mon_pp_mclk_switch = true;
1466
1467         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1468                 init_data.flags.disable_fractional_pwm = true;
1469
1470         if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1471                 init_data.flags.edp_no_power_sequencing = true;
1472
1473         init_data.flags.power_down_display_on_boot = true;
1474
1475         INIT_LIST_HEAD(&adev->dm.da_list);
1476
1477         retrieve_dmi_info(&adev->dm);
1478
1479         /* Display Core create. */
1480         adev->dm.dc = dc_create(&init_data);
1481
1482         if (adev->dm.dc) {
1483                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1484         } else {
1485                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1486                 goto error;
1487         }
1488
1489         if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1490                 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1491                 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1492         }
1493
1494         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1495                 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1496         if (dm_should_disable_stutter(adev->pdev))
1497                 adev->dm.dc->debug.disable_stutter = true;
1498
1499         if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1500                 adev->dm.dc->debug.disable_stutter = true;
1501
1502         if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1503                 adev->dm.dc->debug.disable_dsc = true;
1504
1505         if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1506                 adev->dm.dc->debug.disable_clock_gate = true;
1507
1508         r = dm_dmub_hw_init(adev);
1509         if (r) {
1510                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1511                 goto error;
1512         }
1513
1514         dc_hardware_init(adev->dm.dc);
1515
1516         adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1517         if (!adev->dm.hpd_rx_offload_wq) {
1518                 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1519                 goto error;
1520         }
1521
1522 #if defined(CONFIG_DRM_AMD_DC_DCN)
1523         if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1524                 struct dc_phy_addr_space_config pa_config;
1525
1526                 mmhub_read_system_context(adev, &pa_config);
1527
1528                 // Call the DC init_memory func
1529                 dc_setup_system_context(adev->dm.dc, &pa_config);
1530         }
1531 #endif
1532
1533         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1534         if (!adev->dm.freesync_module) {
1535                 DRM_ERROR(
1536                 "amdgpu: failed to initialize freesync_module.\n");
1537         } else
1538                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1539                                 adev->dm.freesync_module);
1540
1541         amdgpu_dm_init_color_mod();
1542
1543 #if defined(CONFIG_DRM_AMD_DC_DCN)
1544         if (adev->dm.dc->caps.max_links > 0) {
1545                 adev->dm.vblank_control_workqueue =
1546                         create_singlethread_workqueue("dm_vblank_control_workqueue");
1547                 if (!adev->dm.vblank_control_workqueue)
1548                         DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1549         }
1550 #endif
1551
1552 #ifdef CONFIG_DRM_AMD_DC_HDCP
1553         if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1554                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1555
1556                 if (!adev->dm.hdcp_workqueue)
1557                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1558                 else
1559                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1560
1561                 dc_init_callbacks(adev->dm.dc, &init_params);
1562         }
1563 #endif
1564 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1565         adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1566 #endif
1567         if (dc_enable_dmub_notifications(adev->dm.dc)) {
1568                 init_completion(&adev->dm.dmub_aux_transfer_done);
1569                 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1570                 if (!adev->dm.dmub_notify) {
1571                         DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1572                         goto error;
1573                 }
1574
1575                 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1576                 if (!adev->dm.delayed_hpd_wq) {
1577                         DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1578                         goto error;
1579                 }
1580
1581                 amdgpu_dm_outbox_init(adev);
1582 #if defined(CONFIG_DRM_AMD_DC_DCN)
1583                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1584                         dmub_aux_setconfig_callback, false)) {
1585                         DRM_ERROR("amdgpu: fail to register dmub aux callback");
1586                         goto error;
1587                 }
1588                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1589                         DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1590                         goto error;
1591                 }
1592 #endif
1593         }
1594
1595         if (amdgpu_dm_initialize_drm_device(adev)) {
1596                 DRM_ERROR(
1597                 "amdgpu: failed to initialize sw for display support.\n");
1598                 goto error;
1599         }
1600
1601         /* create fake encoders for MST */
1602         dm_dp_create_fake_mst_encoders(adev);
1603
1604         /* TODO: Add_display_info? */
1605
1606         /* TODO use dynamic cursor width */
1607         adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1608         adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1609
1610         if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1611                 DRM_ERROR(
1612                 "amdgpu: failed to initialize sw for display support.\n");
1613                 goto error;
1614         }
1615
1616
1617         DRM_DEBUG_DRIVER("KMS initialized.\n");
1618
1619         return 0;
1620 error:
1621         amdgpu_dm_fini(adev);
1622
1623         return -EINVAL;
1624 }
1625
1626 static int amdgpu_dm_early_fini(void *handle)
1627 {
1628         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1629
1630         amdgpu_dm_audio_fini(adev);
1631
1632         return 0;
1633 }
1634
1635 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1636 {
1637         int i;
1638
1639 #if defined(CONFIG_DRM_AMD_DC_DCN)
1640         if (adev->dm.vblank_control_workqueue) {
1641                 destroy_workqueue(adev->dm.vblank_control_workqueue);
1642                 adev->dm.vblank_control_workqueue = NULL;
1643         }
1644 #endif
1645
1646         for (i = 0; i < adev->dm.display_indexes_num; i++) {
1647                 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1648         }
1649
1650         amdgpu_dm_destroy_drm_device(&adev->dm);
1651
1652 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1653         if (adev->dm.crc_rd_wrk) {
1654                 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1655                 kfree(adev->dm.crc_rd_wrk);
1656                 adev->dm.crc_rd_wrk = NULL;
1657         }
1658 #endif
1659 #ifdef CONFIG_DRM_AMD_DC_HDCP
1660         if (adev->dm.hdcp_workqueue) {
1661                 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1662                 adev->dm.hdcp_workqueue = NULL;
1663         }
1664
1665         if (adev->dm.dc)
1666                 dc_deinit_callbacks(adev->dm.dc);
1667 #endif
1668
1669         dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1670
1671         if (dc_enable_dmub_notifications(adev->dm.dc)) {
1672                 kfree(adev->dm.dmub_notify);
1673                 adev->dm.dmub_notify = NULL;
1674                 destroy_workqueue(adev->dm.delayed_hpd_wq);
1675                 adev->dm.delayed_hpd_wq = NULL;
1676         }
1677
1678         if (adev->dm.dmub_bo)
1679                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1680                                       &adev->dm.dmub_bo_gpu_addr,
1681                                       &adev->dm.dmub_bo_cpu_addr);
1682
1683         /* DC Destroy TODO: Replace destroy DAL */
1684         if (adev->dm.dc)
1685                 dc_destroy(&adev->dm.dc);
1686         /*
1687          * TODO: pageflip, vlank interrupt
1688          *
1689          * amdgpu_dm_irq_fini(adev);
1690          */
1691
1692         if (adev->dm.cgs_device) {
1693                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1694                 adev->dm.cgs_device = NULL;
1695         }
1696         if (adev->dm.freesync_module) {
1697                 mod_freesync_destroy(adev->dm.freesync_module);
1698                 adev->dm.freesync_module = NULL;
1699         }
1700
1701         if (adev->dm.hpd_rx_offload_wq) {
1702                 for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1703                         if (adev->dm.hpd_rx_offload_wq[i].wq) {
1704                                 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1705                                 adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1706                         }
1707                 }
1708
1709                 kfree(adev->dm.hpd_rx_offload_wq);
1710                 adev->dm.hpd_rx_offload_wq = NULL;
1711         }
1712
1713         mutex_destroy(&adev->dm.audio_lock);
1714         mutex_destroy(&adev->dm.dc_lock);
1715
1716         return;
1717 }
1718
1719 static int load_dmcu_fw(struct amdgpu_device *adev)
1720 {
1721         const char *fw_name_dmcu = NULL;
1722         int r;
1723         const struct dmcu_firmware_header_v1_0 *hdr;
1724
1725         switch(adev->asic_type) {
1726 #if defined(CONFIG_DRM_AMD_DC_SI)
1727         case CHIP_TAHITI:
1728         case CHIP_PITCAIRN:
1729         case CHIP_VERDE:
1730         case CHIP_OLAND:
1731 #endif
1732         case CHIP_BONAIRE:
1733         case CHIP_HAWAII:
1734         case CHIP_KAVERI:
1735         case CHIP_KABINI:
1736         case CHIP_MULLINS:
1737         case CHIP_TONGA:
1738         case CHIP_FIJI:
1739         case CHIP_CARRIZO:
1740         case CHIP_STONEY:
1741         case CHIP_POLARIS11:
1742         case CHIP_POLARIS10:
1743         case CHIP_POLARIS12:
1744         case CHIP_VEGAM:
1745         case CHIP_VEGA10:
1746         case CHIP_VEGA12:
1747         case CHIP_VEGA20:
1748         case CHIP_NAVI10:
1749         case CHIP_NAVI14:
1750         case CHIP_RENOIR:
1751         case CHIP_SIENNA_CICHLID:
1752         case CHIP_NAVY_FLOUNDER:
1753         case CHIP_DIMGREY_CAVEFISH:
1754         case CHIP_BEIGE_GOBY:
1755         case CHIP_VANGOGH:
1756         case CHIP_YELLOW_CARP:
1757                 return 0;
1758         case CHIP_NAVI12:
1759                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1760                 break;
1761         case CHIP_RAVEN:
1762                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1763                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1764                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1765                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1766                 else
1767                         return 0;
1768                 break;
1769         default:
1770                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1771                 return -EINVAL;
1772         }
1773
1774         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1775                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1776                 return 0;
1777         }
1778
1779         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1780         if (r == -ENOENT) {
1781                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1782                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1783                 adev->dm.fw_dmcu = NULL;
1784                 return 0;
1785         }
1786         if (r) {
1787                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1788                         fw_name_dmcu);
1789                 return r;
1790         }
1791
1792         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1793         if (r) {
1794                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1795                         fw_name_dmcu);
1796                 release_firmware(adev->dm.fw_dmcu);
1797                 adev->dm.fw_dmcu = NULL;
1798                 return r;
1799         }
1800
1801         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1802         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1803         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1804         adev->firmware.fw_size +=
1805                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1806
1807         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1808         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1809         adev->firmware.fw_size +=
1810                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1811
1812         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1813
1814         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1815
1816         return 0;
1817 }
1818
1819 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1820 {
1821         struct amdgpu_device *adev = ctx;
1822
1823         return dm_read_reg(adev->dm.dc->ctx, address);
1824 }
1825
1826 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1827                                      uint32_t value)
1828 {
1829         struct amdgpu_device *adev = ctx;
1830
1831         return dm_write_reg(adev->dm.dc->ctx, address, value);
1832 }
1833
1834 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1835 {
1836         struct dmub_srv_create_params create_params;
1837         struct dmub_srv_region_params region_params;
1838         struct dmub_srv_region_info region_info;
1839         struct dmub_srv_fb_params fb_params;
1840         struct dmub_srv_fb_info *fb_info;
1841         struct dmub_srv *dmub_srv;
1842         const struct dmcub_firmware_header_v1_0 *hdr;
1843         const char *fw_name_dmub;
1844         enum dmub_asic dmub_asic;
1845         enum dmub_status status;
1846         int r;
1847
1848         switch (adev->asic_type) {
1849         case CHIP_RENOIR:
1850                 dmub_asic = DMUB_ASIC_DCN21;
1851                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1852                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1853                         fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1854                 break;
1855         case CHIP_SIENNA_CICHLID:
1856                 dmub_asic = DMUB_ASIC_DCN30;
1857                 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1858                 break;
1859         case CHIP_NAVY_FLOUNDER:
1860                 dmub_asic = DMUB_ASIC_DCN30;
1861                 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1862                 break;
1863         case CHIP_VANGOGH:
1864                 dmub_asic = DMUB_ASIC_DCN301;
1865                 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1866                 break;
1867         case CHIP_DIMGREY_CAVEFISH:
1868                 dmub_asic = DMUB_ASIC_DCN302;
1869                 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1870                 break;
1871         case CHIP_BEIGE_GOBY:
1872                 dmub_asic = DMUB_ASIC_DCN303;
1873                 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1874                 break;
1875         case CHIP_YELLOW_CARP:
1876                 dmub_asic = DMUB_ASIC_DCN31;
1877                 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1878                 break;
1879
1880         default:
1881                 /* ASIC doesn't support DMUB. */
1882                 return 0;
1883         }
1884
1885         r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1886         if (r) {
1887                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1888                 return 0;
1889         }
1890
1891         r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1892         if (r) {
1893                 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1894                 return 0;
1895         }
1896
1897         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1898         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1899
1900         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1901                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1902                         AMDGPU_UCODE_ID_DMCUB;
1903                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1904                         adev->dm.dmub_fw;
1905                 adev->firmware.fw_size +=
1906                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1907
1908                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1909                          adev->dm.dmcub_fw_version);
1910         }
1911
1912
1913         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1914         dmub_srv = adev->dm.dmub_srv;
1915
1916         if (!dmub_srv) {
1917                 DRM_ERROR("Failed to allocate DMUB service!\n");
1918                 return -ENOMEM;
1919         }
1920
1921         memset(&create_params, 0, sizeof(create_params));
1922         create_params.user_ctx = adev;
1923         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1924         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1925         create_params.asic = dmub_asic;
1926
1927         /* Create the DMUB service. */
1928         status = dmub_srv_create(dmub_srv, &create_params);
1929         if (status != DMUB_STATUS_OK) {
1930                 DRM_ERROR("Error creating DMUB service: %d\n", status);
1931                 return -EINVAL;
1932         }
1933
1934         /* Calculate the size of all the regions for the DMUB service. */
1935         memset(&region_params, 0, sizeof(region_params));
1936
1937         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1938                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1939         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1940         region_params.vbios_size = adev->bios_size;
1941         region_params.fw_bss_data = region_params.bss_data_size ?
1942                 adev->dm.dmub_fw->data +
1943                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1944                 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1945         region_params.fw_inst_const =
1946                 adev->dm.dmub_fw->data +
1947                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1948                 PSP_HEADER_BYTES;
1949
1950         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1951                                            &region_info);
1952
1953         if (status != DMUB_STATUS_OK) {
1954                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1955                 return -EINVAL;
1956         }
1957
1958         /*
1959          * Allocate a framebuffer based on the total size of all the regions.
1960          * TODO: Move this into GART.
1961          */
1962         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1963                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1964                                     &adev->dm.dmub_bo_gpu_addr,
1965                                     &adev->dm.dmub_bo_cpu_addr);
1966         if (r)
1967                 return r;
1968
1969         /* Rebase the regions on the framebuffer address. */
1970         memset(&fb_params, 0, sizeof(fb_params));
1971         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1972         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1973         fb_params.region_info = &region_info;
1974
1975         adev->dm.dmub_fb_info =
1976                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1977         fb_info = adev->dm.dmub_fb_info;
1978
1979         if (!fb_info) {
1980                 DRM_ERROR(
1981                         "Failed to allocate framebuffer info for DMUB service!\n");
1982                 return -ENOMEM;
1983         }
1984
1985         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1986         if (status != DMUB_STATUS_OK) {
1987                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1988                 return -EINVAL;
1989         }
1990
1991         return 0;
1992 }
1993
1994 static int dm_sw_init(void *handle)
1995 {
1996         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1997         int r;
1998
1999         r = dm_dmub_sw_init(adev);
2000         if (r)
2001                 return r;
2002
2003         return load_dmcu_fw(adev);
2004 }
2005
2006 static int dm_sw_fini(void *handle)
2007 {
2008         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2009
2010         kfree(adev->dm.dmub_fb_info);
2011         adev->dm.dmub_fb_info = NULL;
2012
2013         if (adev->dm.dmub_srv) {
2014                 dmub_srv_destroy(adev->dm.dmub_srv);
2015                 adev->dm.dmub_srv = NULL;
2016         }
2017
2018         release_firmware(adev->dm.dmub_fw);
2019         adev->dm.dmub_fw = NULL;
2020
2021         release_firmware(adev->dm.fw_dmcu);
2022         adev->dm.fw_dmcu = NULL;
2023
2024         return 0;
2025 }
2026
2027 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2028 {
2029         struct amdgpu_dm_connector *aconnector;
2030         struct drm_connector *connector;
2031         struct drm_connector_list_iter iter;
2032         int ret = 0;
2033
2034         drm_connector_list_iter_begin(dev, &iter);
2035         drm_for_each_connector_iter(connector, &iter) {
2036                 aconnector = to_amdgpu_dm_connector(connector);
2037                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
2038                     aconnector->mst_mgr.aux) {
2039                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2040                                          aconnector,
2041                                          aconnector->base.base.id);
2042
2043                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2044                         if (ret < 0) {
2045                                 DRM_ERROR("DM_MST: Failed to start MST\n");
2046                                 aconnector->dc_link->type =
2047                                         dc_connection_single;
2048                                 break;
2049                         }
2050                 }
2051         }
2052         drm_connector_list_iter_end(&iter);
2053
2054         return ret;
2055 }
2056
2057 static int dm_late_init(void *handle)
2058 {
2059         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2060
2061         struct dmcu_iram_parameters params;
2062         unsigned int linear_lut[16];
2063         int i;
2064         struct dmcu *dmcu = NULL;
2065
2066         dmcu = adev->dm.dc->res_pool->dmcu;
2067
2068         for (i = 0; i < 16; i++)
2069                 linear_lut[i] = 0xFFFF * i / 15;
2070
2071         params.set = 0;
2072         params.backlight_ramping_override = false;
2073         params.backlight_ramping_start = 0xCCCC;
2074         params.backlight_ramping_reduction = 0xCCCCCCCC;
2075         params.backlight_lut_array_size = 16;
2076         params.backlight_lut_array = linear_lut;
2077
2078         /* Min backlight level after ABM reduction,  Don't allow below 1%
2079          * 0xFFFF x 0.01 = 0x28F
2080          */
2081         params.min_abm_backlight = 0x28F;
2082         /* In the case where abm is implemented on dmcub,
2083         * dmcu object will be null.
2084         * ABM 2.4 and up are implemented on dmcub.
2085         */
2086         if (dmcu) {
2087                 if (!dmcu_load_iram(dmcu, params))
2088                         return -EINVAL;
2089         } else if (adev->dm.dc->ctx->dmub_srv) {
2090                 struct dc_link *edp_links[MAX_NUM_EDP];
2091                 int edp_num;
2092
2093                 get_edp_links(adev->dm.dc, edp_links, &edp_num);
2094                 for (i = 0; i < edp_num; i++) {
2095                         if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2096                                 return -EINVAL;
2097                 }
2098         }
2099
2100         return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2101 }
2102
2103 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2104 {
2105         struct amdgpu_dm_connector *aconnector;
2106         struct drm_connector *connector;
2107         struct drm_connector_list_iter iter;
2108         struct drm_dp_mst_topology_mgr *mgr;
2109         int ret;
2110         bool need_hotplug = false;
2111
2112         drm_connector_list_iter_begin(dev, &iter);
2113         drm_for_each_connector_iter(connector, &iter) {
2114                 aconnector = to_amdgpu_dm_connector(connector);
2115                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
2116                     aconnector->mst_port)
2117                         continue;
2118
2119                 mgr = &aconnector->mst_mgr;
2120
2121                 if (suspend) {
2122                         drm_dp_mst_topology_mgr_suspend(mgr);
2123                 } else {
2124                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2125                         if (ret < 0) {
2126                                 drm_dp_mst_topology_mgr_set_mst(mgr, false);
2127                                 need_hotplug = true;
2128                         }
2129                 }
2130         }
2131         drm_connector_list_iter_end(&iter);
2132
2133         if (need_hotplug)
2134                 drm_kms_helper_hotplug_event(dev);
2135 }
2136
2137 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2138 {
2139         struct smu_context *smu = &adev->smu;
2140         int ret = 0;
2141
2142         if (!is_support_sw_smu(adev))
2143                 return 0;
2144
2145         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2146          * on window driver dc implementation.
2147          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2148          * should be passed to smu during boot up and resume from s3.
2149          * boot up: dc calculate dcn watermark clock settings within dc_create,
2150          * dcn20_resource_construct
2151          * then call pplib functions below to pass the settings to smu:
2152          * smu_set_watermarks_for_clock_ranges
2153          * smu_set_watermarks_table
2154          * navi10_set_watermarks_table
2155          * smu_write_watermarks_table
2156          *
2157          * For Renoir, clock settings of dcn watermark are also fixed values.
2158          * dc has implemented different flow for window driver:
2159          * dc_hardware_init / dc_set_power_state
2160          * dcn10_init_hw
2161          * notify_wm_ranges
2162          * set_wm_ranges
2163          * -- Linux
2164          * smu_set_watermarks_for_clock_ranges
2165          * renoir_set_watermarks_table
2166          * smu_write_watermarks_table
2167          *
2168          * For Linux,
2169          * dc_hardware_init -> amdgpu_dm_init
2170          * dc_set_power_state --> dm_resume
2171          *
2172          * therefore, this function apply to navi10/12/14 but not Renoir
2173          * *
2174          */
2175         switch(adev->asic_type) {
2176         case CHIP_NAVI10:
2177         case CHIP_NAVI14:
2178         case CHIP_NAVI12:
2179                 break;
2180         default:
2181                 return 0;
2182         }
2183
2184         ret = smu_write_watermarks_table(smu);
2185         if (ret) {
2186                 DRM_ERROR("Failed to update WMTABLE!\n");
2187                 return ret;
2188         }
2189
2190         return 0;
2191 }
2192
2193 /**
2194  * dm_hw_init() - Initialize DC device
2195  * @handle: The base driver device containing the amdgpu_dm device.
2196  *
2197  * Initialize the &struct amdgpu_display_manager device. This involves calling
2198  * the initializers of each DM component, then populating the struct with them.
2199  *
2200  * Although the function implies hardware initialization, both hardware and
2201  * software are initialized here. Splitting them out to their relevant init
2202  * hooks is a future TODO item.
2203  *
2204  * Some notable things that are initialized here:
2205  *
2206  * - Display Core, both software and hardware
2207  * - DC modules that we need (freesync and color management)
2208  * - DRM software states
2209  * - Interrupt sources and handlers
2210  * - Vblank support
2211  * - Debug FS entries, if enabled
2212  */
2213 static int dm_hw_init(void *handle)
2214 {
2215         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2216         /* Create DAL display manager */
2217         amdgpu_dm_init(adev);
2218         amdgpu_dm_hpd_init(adev);
2219
2220         return 0;
2221 }
2222
2223 /**
2224  * dm_hw_fini() - Teardown DC device
2225  * @handle: The base driver device containing the amdgpu_dm device.
2226  *
2227  * Teardown components within &struct amdgpu_display_manager that require
2228  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2229  * were loaded. Also flush IRQ workqueues and disable them.
2230  */
2231 static int dm_hw_fini(void *handle)
2232 {
2233         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2234
2235         amdgpu_dm_hpd_fini(adev);
2236
2237         amdgpu_dm_irq_fini(adev);
2238         amdgpu_dm_fini(adev);
2239         return 0;
2240 }
2241
2242
2243 static int dm_enable_vblank(struct drm_crtc *crtc);
2244 static void dm_disable_vblank(struct drm_crtc *crtc);
2245
2246 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2247                                  struct dc_state *state, bool enable)
2248 {
2249         enum dc_irq_source irq_source;
2250         struct amdgpu_crtc *acrtc;
2251         int rc = -EBUSY;
2252         int i = 0;
2253
2254         for (i = 0; i < state->stream_count; i++) {
2255                 acrtc = get_crtc_by_otg_inst(
2256                                 adev, state->stream_status[i].primary_otg_inst);
2257
2258                 if (acrtc && state->stream_status[i].plane_count != 0) {
2259                         irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2260                         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2261                         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2262                                       acrtc->crtc_id, enable ? "en" : "dis", rc);
2263                         if (rc)
2264                                 DRM_WARN("Failed to %s pflip interrupts\n",
2265                                          enable ? "enable" : "disable");
2266
2267                         if (enable) {
2268                                 rc = dm_enable_vblank(&acrtc->base);
2269                                 if (rc)
2270                                         DRM_WARN("Failed to enable vblank interrupts\n");
2271                         } else {
2272                                 dm_disable_vblank(&acrtc->base);
2273                         }
2274
2275                 }
2276         }
2277
2278 }
2279
2280 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2281 {
2282         struct dc_state *context = NULL;
2283         enum dc_status res = DC_ERROR_UNEXPECTED;
2284         int i;
2285         struct dc_stream_state *del_streams[MAX_PIPES];
2286         int del_streams_count = 0;
2287
2288         memset(del_streams, 0, sizeof(del_streams));
2289
2290         context = dc_create_state(dc);
2291         if (context == NULL)
2292                 goto context_alloc_fail;
2293
2294         dc_resource_state_copy_construct_current(dc, context);
2295
2296         /* First remove from context all streams */
2297         for (i = 0; i < context->stream_count; i++) {
2298                 struct dc_stream_state *stream = context->streams[i];
2299
2300                 del_streams[del_streams_count++] = stream;
2301         }
2302
2303         /* Remove all planes for removed streams and then remove the streams */
2304         for (i = 0; i < del_streams_count; i++) {
2305                 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2306                         res = DC_FAIL_DETACH_SURFACES;
2307                         goto fail;
2308                 }
2309
2310                 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2311                 if (res != DC_OK)
2312                         goto fail;
2313         }
2314
2315
2316         res = dc_validate_global_state(dc, context, false);
2317
2318         if (res != DC_OK) {
2319                 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
2320                 goto fail;
2321         }
2322
2323         res = dc_commit_state(dc, context);
2324
2325 fail:
2326         dc_release_state(context);
2327
2328 context_alloc_fail:
2329         return res;
2330 }
2331
2332 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2333 {
2334         int i;
2335
2336         if (dm->hpd_rx_offload_wq) {
2337                 for (i = 0; i < dm->dc->caps.max_links; i++)
2338                         flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2339         }
2340 }
2341
2342 static int dm_suspend(void *handle)
2343 {
2344         struct amdgpu_device *adev = handle;
2345         struct amdgpu_display_manager *dm = &adev->dm;
2346         int ret = 0;
2347
2348         if (amdgpu_in_reset(adev)) {
2349                 mutex_lock(&dm->dc_lock);
2350
2351 #if defined(CONFIG_DRM_AMD_DC_DCN)
2352                 dc_allow_idle_optimizations(adev->dm.dc, false);
2353 #endif
2354
2355                 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2356
2357                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2358
2359                 amdgpu_dm_commit_zero_streams(dm->dc);
2360
2361                 amdgpu_dm_irq_suspend(adev);
2362
2363                 hpd_rx_irq_work_suspend(dm);
2364
2365                 return ret;
2366         }
2367
2368         WARN_ON(adev->dm.cached_state);
2369         adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2370
2371         s3_handle_mst(adev_to_drm(adev), true);
2372
2373         amdgpu_dm_irq_suspend(adev);
2374
2375         hpd_rx_irq_work_suspend(dm);
2376
2377         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2378
2379         return 0;
2380 }
2381
2382 static struct amdgpu_dm_connector *
2383 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2384                                              struct drm_crtc *crtc)
2385 {
2386         uint32_t i;
2387         struct drm_connector_state *new_con_state;
2388         struct drm_connector *connector;
2389         struct drm_crtc *crtc_from_state;
2390
2391         for_each_new_connector_in_state(state, connector, new_con_state, i) {
2392                 crtc_from_state = new_con_state->crtc;
2393
2394                 if (crtc_from_state == crtc)
2395                         return to_amdgpu_dm_connector(connector);
2396         }
2397
2398         return NULL;
2399 }
2400
2401 static void emulated_link_detect(struct dc_link *link)
2402 {
2403         struct dc_sink_init_data sink_init_data = { 0 };
2404         struct display_sink_capability sink_caps = { 0 };
2405         enum dc_edid_status edid_status;
2406         struct dc_context *dc_ctx = link->ctx;
2407         struct dc_sink *sink = NULL;
2408         struct dc_sink *prev_sink = NULL;
2409
2410         link->type = dc_connection_none;
2411         prev_sink = link->local_sink;
2412
2413         if (prev_sink)
2414                 dc_sink_release(prev_sink);
2415
2416         switch (link->connector_signal) {
2417         case SIGNAL_TYPE_HDMI_TYPE_A: {
2418                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2419                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2420                 break;
2421         }
2422
2423         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2424                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2425                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2426                 break;
2427         }
2428
2429         case SIGNAL_TYPE_DVI_DUAL_LINK: {
2430                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2431                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2432                 break;
2433         }
2434
2435         case SIGNAL_TYPE_LVDS: {
2436                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2437                 sink_caps.signal = SIGNAL_TYPE_LVDS;
2438                 break;
2439         }
2440
2441         case SIGNAL_TYPE_EDP: {
2442                 sink_caps.transaction_type =
2443                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2444                 sink_caps.signal = SIGNAL_TYPE_EDP;
2445                 break;
2446         }
2447
2448         case SIGNAL_TYPE_DISPLAY_PORT: {
2449                 sink_caps.transaction_type =
2450                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2451                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2452                 break;
2453         }
2454
2455         default:
2456                 DC_ERROR("Invalid connector type! signal:%d\n",
2457                         link->connector_signal);
2458                 return;
2459         }
2460
2461         sink_init_data.link = link;
2462         sink_init_data.sink_signal = sink_caps.signal;
2463
2464         sink = dc_sink_create(&sink_init_data);
2465         if (!sink) {
2466                 DC_ERROR("Failed to create sink!\n");
2467                 return;
2468         }
2469
2470         /* dc_sink_create returns a new reference */
2471         link->local_sink = sink;
2472
2473         edid_status = dm_helpers_read_local_edid(
2474                         link->ctx,
2475                         link,
2476                         sink);
2477
2478         if (edid_status != EDID_OK)
2479                 DC_ERROR("Failed to read EDID");
2480
2481 }
2482
2483 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2484                                      struct amdgpu_display_manager *dm)
2485 {
2486         struct {
2487                 struct dc_surface_update surface_updates[MAX_SURFACES];
2488                 struct dc_plane_info plane_infos[MAX_SURFACES];
2489                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2490                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2491                 struct dc_stream_update stream_update;
2492         } * bundle;
2493         int k, m;
2494
2495         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2496
2497         if (!bundle) {
2498                 dm_error("Failed to allocate update bundle\n");
2499                 goto cleanup;
2500         }
2501
2502         for (k = 0; k < dc_state->stream_count; k++) {
2503                 bundle->stream_update.stream = dc_state->streams[k];
2504
2505                 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2506                         bundle->surface_updates[m].surface =
2507                                 dc_state->stream_status->plane_states[m];
2508                         bundle->surface_updates[m].surface->force_full_update =
2509                                 true;
2510                 }
2511                 dc_commit_updates_for_stream(
2512                         dm->dc, bundle->surface_updates,
2513                         dc_state->stream_status->plane_count,
2514                         dc_state->streams[k], &bundle->stream_update, dc_state);
2515         }
2516
2517 cleanup:
2518         kfree(bundle);
2519
2520         return;
2521 }
2522
2523 static void dm_set_dpms_off(struct dc_link *link)
2524 {
2525         struct dc_stream_state *stream_state;
2526         struct amdgpu_dm_connector *aconnector = link->priv;
2527         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2528         struct dc_stream_update stream_update;
2529         bool dpms_off = true;
2530
2531         memset(&stream_update, 0, sizeof(stream_update));
2532         stream_update.dpms_off = &dpms_off;
2533
2534         mutex_lock(&adev->dm.dc_lock);
2535         stream_state = dc_stream_find_from_link(link);
2536
2537         if (stream_state == NULL) {
2538                 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2539                 mutex_unlock(&adev->dm.dc_lock);
2540                 return;
2541         }
2542
2543         stream_update.stream = stream_state;
2544         dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2545                                      stream_state, &stream_update,
2546                                      stream_state->ctx->dc->current_state);
2547         mutex_unlock(&adev->dm.dc_lock);
2548 }
2549
2550 static int dm_resume(void *handle)
2551 {
2552         struct amdgpu_device *adev = handle;
2553         struct drm_device *ddev = adev_to_drm(adev);
2554         struct amdgpu_display_manager *dm = &adev->dm;
2555         struct amdgpu_dm_connector *aconnector;
2556         struct drm_connector *connector;
2557         struct drm_connector_list_iter iter;
2558         struct drm_crtc *crtc;
2559         struct drm_crtc_state *new_crtc_state;
2560         struct dm_crtc_state *dm_new_crtc_state;
2561         struct drm_plane *plane;
2562         struct drm_plane_state *new_plane_state;
2563         struct dm_plane_state *dm_new_plane_state;
2564         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2565         enum dc_connection_type new_connection_type = dc_connection_none;
2566         struct dc_state *dc_state;
2567         int i, r, j;
2568
2569         if (amdgpu_in_reset(adev)) {
2570                 dc_state = dm->cached_dc_state;
2571
2572                 if (dc_enable_dmub_notifications(adev->dm.dc))
2573                         amdgpu_dm_outbox_init(adev);
2574
2575                 r = dm_dmub_hw_init(adev);
2576                 if (r)
2577                         DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2578
2579                 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2580                 dc_resume(dm->dc);
2581
2582                 amdgpu_dm_irq_resume_early(adev);
2583
2584                 for (i = 0; i < dc_state->stream_count; i++) {
2585                         dc_state->streams[i]->mode_changed = true;
2586                         for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2587                                 dc_state->stream_status[i].plane_states[j]->update_flags.raw
2588                                         = 0xffffffff;
2589                         }
2590                 }
2591 #if defined(CONFIG_DRM_AMD_DC_DCN)
2592                 /*
2593                  * Resource allocation happens for link encoders for newer ASIC in
2594                  * dc_validate_global_state, so we need to revalidate it.
2595                  *
2596                  * This shouldn't fail (it passed once before), so warn if it does.
2597                  */
2598                 WARN_ON(dc_validate_global_state(dm->dc, dc_state, false) != DC_OK);
2599 #endif
2600
2601                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2602
2603                 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2604
2605                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2606
2607                 dc_release_state(dm->cached_dc_state);
2608                 dm->cached_dc_state = NULL;
2609
2610                 amdgpu_dm_irq_resume_late(adev);
2611
2612                 mutex_unlock(&dm->dc_lock);
2613
2614                 return 0;
2615         }
2616         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2617         dc_release_state(dm_state->context);
2618         dm_state->context = dc_create_state(dm->dc);
2619         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2620         dc_resource_state_construct(dm->dc, dm_state->context);
2621
2622         /* Re-enable outbox interrupts for DPIA. */
2623         if (dc_enable_dmub_notifications(adev->dm.dc))
2624                 amdgpu_dm_outbox_init(adev);
2625
2626         /* Before powering on DC we need to re-initialize DMUB. */
2627         dm_dmub_hw_resume(adev);
2628
2629         /* power on hardware */
2630         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2631
2632         /* program HPD filter */
2633         dc_resume(dm->dc);
2634
2635         /*
2636          * early enable HPD Rx IRQ, should be done before set mode as short
2637          * pulse interrupts are used for MST
2638          */
2639         amdgpu_dm_irq_resume_early(adev);
2640
2641         /* On resume we need to rewrite the MSTM control bits to enable MST*/
2642         s3_handle_mst(ddev, false);
2643
2644         /* Do detection*/
2645         drm_connector_list_iter_begin(ddev, &iter);
2646         drm_for_each_connector_iter(connector, &iter) {
2647                 aconnector = to_amdgpu_dm_connector(connector);
2648
2649                 /*
2650                  * this is the case when traversing through already created
2651                  * MST connectors, should be skipped
2652                  */
2653                 if (aconnector->dc_link &&
2654                     aconnector->dc_link->type == dc_connection_mst_branch)
2655                         continue;
2656
2657                 mutex_lock(&aconnector->hpd_lock);
2658                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2659                         DRM_ERROR("KMS: Failed to detect connector\n");
2660
2661                 if (aconnector->base.force && new_connection_type == dc_connection_none)
2662                         emulated_link_detect(aconnector->dc_link);
2663                 else
2664                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2665
2666                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2667                         aconnector->fake_enable = false;
2668
2669                 if (aconnector->dc_sink)
2670                         dc_sink_release(aconnector->dc_sink);
2671                 aconnector->dc_sink = NULL;
2672                 amdgpu_dm_update_connector_after_detect(aconnector);
2673                 mutex_unlock(&aconnector->hpd_lock);
2674         }
2675         drm_connector_list_iter_end(&iter);
2676
2677         /* Force mode set in atomic commit */
2678         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2679                 new_crtc_state->active_changed = true;
2680
2681         /*
2682          * atomic_check is expected to create the dc states. We need to release
2683          * them here, since they were duplicated as part of the suspend
2684          * procedure.
2685          */
2686         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2687                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2688                 if (dm_new_crtc_state->stream) {
2689                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2690                         dc_stream_release(dm_new_crtc_state->stream);
2691                         dm_new_crtc_state->stream = NULL;
2692                 }
2693         }
2694
2695         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2696                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2697                 if (dm_new_plane_state->dc_state) {
2698                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2699                         dc_plane_state_release(dm_new_plane_state->dc_state);
2700                         dm_new_plane_state->dc_state = NULL;
2701                 }
2702         }
2703
2704         drm_atomic_helper_resume(ddev, dm->cached_state);
2705
2706         dm->cached_state = NULL;
2707
2708         amdgpu_dm_irq_resume_late(adev);
2709
2710         amdgpu_dm_smu_write_watermarks_table(adev);
2711
2712         return 0;
2713 }
2714
2715 /**
2716  * DOC: DM Lifecycle
2717  *
2718  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2719  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2720  * the base driver's device list to be initialized and torn down accordingly.
2721  *
2722  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2723  */
2724
2725 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2726         .name = "dm",
2727         .early_init = dm_early_init,
2728         .late_init = dm_late_init,
2729         .sw_init = dm_sw_init,
2730         .sw_fini = dm_sw_fini,
2731         .early_fini = amdgpu_dm_early_fini,
2732         .hw_init = dm_hw_init,
2733         .hw_fini = dm_hw_fini,
2734         .suspend = dm_suspend,
2735         .resume = dm_resume,
2736         .is_idle = dm_is_idle,
2737         .wait_for_idle = dm_wait_for_idle,
2738         .check_soft_reset = dm_check_soft_reset,
2739         .soft_reset = dm_soft_reset,
2740         .set_clockgating_state = dm_set_clockgating_state,
2741         .set_powergating_state = dm_set_powergating_state,
2742 };
2743
2744 const struct amdgpu_ip_block_version dm_ip_block =
2745 {
2746         .type = AMD_IP_BLOCK_TYPE_DCE,
2747         .major = 1,
2748         .minor = 0,
2749         .rev = 0,
2750         .funcs = &amdgpu_dm_funcs,
2751 };
2752
2753
2754 /**
2755  * DOC: atomic
2756  *
2757  * *WIP*
2758  */
2759
2760 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2761         .fb_create = amdgpu_display_user_framebuffer_create,
2762         .get_format_info = amd_get_format_info,
2763         .output_poll_changed = drm_fb_helper_output_poll_changed,
2764         .atomic_check = amdgpu_dm_atomic_check,
2765         .atomic_commit = drm_atomic_helper_commit,
2766 };
2767
2768 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2769         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2770 };
2771
2772 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2773 {
2774         u32 max_avg, min_cll, max, min, q, r;
2775         struct amdgpu_dm_backlight_caps *caps;
2776         struct amdgpu_display_manager *dm;
2777         struct drm_connector *conn_base;
2778         struct amdgpu_device *adev;
2779         struct dc_link *link = NULL;
2780         static const u8 pre_computed_values[] = {
2781                 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2782                 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2783         int i;
2784
2785         if (!aconnector || !aconnector->dc_link)
2786                 return;
2787
2788         link = aconnector->dc_link;
2789         if (link->connector_signal != SIGNAL_TYPE_EDP)
2790                 return;
2791
2792         conn_base = &aconnector->base;
2793         adev = drm_to_adev(conn_base->dev);
2794         dm = &adev->dm;
2795         for (i = 0; i < dm->num_of_edps; i++) {
2796                 if (link == dm->backlight_link[i])
2797                         break;
2798         }
2799         if (i >= dm->num_of_edps)
2800                 return;
2801         caps = &dm->backlight_caps[i];
2802         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2803         caps->aux_support = false;
2804         max_avg = conn_base->hdr_sink_metadata.hdmi_type1.max_fall;
2805         min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2806
2807         if (caps->ext_caps->bits.oled == 1 /*||
2808             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2809             caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2810                 caps->aux_support = true;
2811
2812         if (amdgpu_backlight == 0)
2813                 caps->aux_support = false;
2814         else if (amdgpu_backlight == 1)
2815                 caps->aux_support = true;
2816
2817         /* From the specification (CTA-861-G), for calculating the maximum
2818          * luminance we need to use:
2819          *      Luminance = 50*2**(CV/32)
2820          * Where CV is a one-byte value.
2821          * For calculating this expression we may need float point precision;
2822          * to avoid this complexity level, we take advantage that CV is divided
2823          * by a constant. From the Euclids division algorithm, we know that CV
2824          * can be written as: CV = 32*q + r. Next, we replace CV in the
2825          * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2826          * need to pre-compute the value of r/32. For pre-computing the values
2827          * We just used the following Ruby line:
2828          *      (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2829          * The results of the above expressions can be verified at
2830          * pre_computed_values.
2831          */
2832         q = max_avg >> 5;
2833         r = max_avg % 32;
2834         max = (1 << q) * pre_computed_values[r];
2835
2836         // min luminance: maxLum * (CV/255)^2 / 100
2837         q = DIV_ROUND_CLOSEST(min_cll, 255);
2838         min = max * DIV_ROUND_CLOSEST((q * q), 100);
2839
2840         caps->aux_max_input_signal = max;
2841         caps->aux_min_input_signal = min;
2842 }
2843
2844 void amdgpu_dm_update_connector_after_detect(
2845                 struct amdgpu_dm_connector *aconnector)
2846 {
2847         struct drm_connector *connector = &aconnector->base;
2848         struct drm_device *dev = connector->dev;
2849         struct dc_sink *sink;
2850
2851         /* MST handled by drm_mst framework */
2852         if (aconnector->mst_mgr.mst_state == true)
2853                 return;
2854
2855         sink = aconnector->dc_link->local_sink;
2856         if (sink)
2857                 dc_sink_retain(sink);
2858
2859         /*
2860          * Edid mgmt connector gets first update only in mode_valid hook and then
2861          * the connector sink is set to either fake or physical sink depends on link status.
2862          * Skip if already done during boot.
2863          */
2864         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2865                         && aconnector->dc_em_sink) {
2866
2867                 /*
2868                  * For S3 resume with headless use eml_sink to fake stream
2869                  * because on resume connector->sink is set to NULL
2870                  */
2871                 mutex_lock(&dev->mode_config.mutex);
2872
2873                 if (sink) {
2874                         if (aconnector->dc_sink) {
2875                                 amdgpu_dm_update_freesync_caps(connector, NULL);
2876                                 /*
2877                                  * retain and release below are used to
2878                                  * bump up refcount for sink because the link doesn't point
2879                                  * to it anymore after disconnect, so on next crtc to connector
2880                                  * reshuffle by UMD we will get into unwanted dc_sink release
2881                                  */
2882                                 dc_sink_release(aconnector->dc_sink);
2883                         }
2884                         aconnector->dc_sink = sink;
2885                         dc_sink_retain(aconnector->dc_sink);
2886                         amdgpu_dm_update_freesync_caps(connector,
2887                                         aconnector->edid);
2888                 } else {
2889                         amdgpu_dm_update_freesync_caps(connector, NULL);
2890                         if (!aconnector->dc_sink) {
2891                                 aconnector->dc_sink = aconnector->dc_em_sink;
2892                                 dc_sink_retain(aconnector->dc_sink);
2893                         }
2894                 }
2895
2896                 mutex_unlock(&dev->mode_config.mutex);
2897
2898                 if (sink)
2899                         dc_sink_release(sink);
2900                 return;
2901         }
2902
2903         /*
2904          * TODO: temporary guard to look for proper fix
2905          * if this sink is MST sink, we should not do anything
2906          */
2907         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2908                 dc_sink_release(sink);
2909                 return;
2910         }
2911
2912         if (aconnector->dc_sink == sink) {
2913                 /*
2914                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
2915                  * Do nothing!!
2916                  */
2917                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2918                                 aconnector->connector_id);
2919                 if (sink)
2920                         dc_sink_release(sink);
2921                 return;
2922         }
2923
2924         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2925                 aconnector->connector_id, aconnector->dc_sink, sink);
2926
2927         mutex_lock(&dev->mode_config.mutex);
2928
2929         /*
2930          * 1. Update status of the drm connector
2931          * 2. Send an event and let userspace tell us what to do
2932          */
2933         if (sink) {
2934                 /*
2935                  * TODO: check if we still need the S3 mode update workaround.
2936                  * If yes, put it here.
2937                  */
2938                 if (aconnector->dc_sink) {
2939                         amdgpu_dm_update_freesync_caps(connector, NULL);
2940                         dc_sink_release(aconnector->dc_sink);
2941                 }
2942
2943                 aconnector->dc_sink = sink;
2944                 dc_sink_retain(aconnector->dc_sink);
2945                 if (sink->dc_edid.length == 0) {
2946                         aconnector->edid = NULL;
2947                         if (aconnector->dc_link->aux_mode) {
2948                                 drm_dp_cec_unset_edid(
2949                                         &aconnector->dm_dp_aux.aux);
2950                         }
2951                 } else {
2952                         aconnector->edid =
2953                                 (struct edid *)sink->dc_edid.raw_edid;
2954
2955                         drm_connector_update_edid_property(connector,
2956                                                            aconnector->edid);
2957                         if (aconnector->dc_link->aux_mode)
2958                                 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2959                                                     aconnector->edid);
2960                 }
2961
2962                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2963                 update_connector_ext_caps(aconnector);
2964         } else {
2965                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2966                 amdgpu_dm_update_freesync_caps(connector, NULL);
2967                 drm_connector_update_edid_property(connector, NULL);
2968                 aconnector->num_modes = 0;
2969                 dc_sink_release(aconnector->dc_sink);
2970                 aconnector->dc_sink = NULL;
2971                 aconnector->edid = NULL;
2972 #ifdef CONFIG_DRM_AMD_DC_HDCP
2973                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2974                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2975                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2976 #endif
2977         }
2978
2979         mutex_unlock(&dev->mode_config.mutex);
2980
2981         update_subconnector_property(aconnector);
2982
2983         if (sink)
2984                 dc_sink_release(sink);
2985 }
2986
2987 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
2988 {
2989         struct drm_connector *connector = &aconnector->base;
2990         struct drm_device *dev = connector->dev;
2991         enum dc_connection_type new_connection_type = dc_connection_none;
2992         struct amdgpu_device *adev = drm_to_adev(dev);
2993 #ifdef CONFIG_DRM_AMD_DC_HDCP
2994         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2995 #endif
2996
2997         if (adev->dm.disable_hpd_irq)
2998                 return;
2999
3000         /*
3001          * In case of failure or MST no need to update connector status or notify the OS
3002          * since (for MST case) MST does this in its own context.
3003          */
3004         mutex_lock(&aconnector->hpd_lock);
3005
3006 #ifdef CONFIG_DRM_AMD_DC_HDCP
3007         if (adev->dm.hdcp_workqueue) {
3008                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3009                 dm_con_state->update_hdcp = true;
3010         }
3011 #endif
3012         if (aconnector->fake_enable)
3013                 aconnector->fake_enable = false;
3014
3015         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3016                 DRM_ERROR("KMS: Failed to detect connector\n");
3017
3018         if (aconnector->base.force && new_connection_type == dc_connection_none) {
3019                 emulated_link_detect(aconnector->dc_link);
3020
3021
3022                 drm_modeset_lock_all(dev);
3023                 dm_restore_drm_connector_state(dev, connector);
3024                 drm_modeset_unlock_all(dev);
3025
3026                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3027                         drm_kms_helper_hotplug_event(dev);
3028
3029         } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3030                 if (new_connection_type == dc_connection_none &&
3031                     aconnector->dc_link->type == dc_connection_none)
3032                         dm_set_dpms_off(aconnector->dc_link);
3033
3034                 amdgpu_dm_update_connector_after_detect(aconnector);
3035
3036                 drm_modeset_lock_all(dev);
3037                 dm_restore_drm_connector_state(dev, connector);
3038                 drm_modeset_unlock_all(dev);
3039
3040                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3041                         drm_kms_helper_hotplug_event(dev);
3042         }
3043         mutex_unlock(&aconnector->hpd_lock);
3044
3045 }
3046
3047 static void handle_hpd_irq(void *param)
3048 {
3049         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3050
3051         handle_hpd_irq_helper(aconnector);
3052
3053 }
3054
3055 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3056 {
3057         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3058         uint8_t dret;
3059         bool new_irq_handled = false;
3060         int dpcd_addr;
3061         int dpcd_bytes_to_read;
3062
3063         const int max_process_count = 30;
3064         int process_count = 0;
3065
3066         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3067
3068         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3069                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3070                 /* DPCD 0x200 - 0x201 for downstream IRQ */
3071                 dpcd_addr = DP_SINK_COUNT;
3072         } else {
3073                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3074                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
3075                 dpcd_addr = DP_SINK_COUNT_ESI;
3076         }
3077
3078         dret = drm_dp_dpcd_read(
3079                 &aconnector->dm_dp_aux.aux,
3080                 dpcd_addr,
3081                 esi,
3082                 dpcd_bytes_to_read);
3083
3084         while (dret == dpcd_bytes_to_read &&
3085                 process_count < max_process_count) {
3086                 uint8_t retry;
3087                 dret = 0;
3088
3089                 process_count++;
3090
3091                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3092                 /* handle HPD short pulse irq */
3093                 if (aconnector->mst_mgr.mst_state)
3094                         drm_dp_mst_hpd_irq(
3095                                 &aconnector->mst_mgr,
3096                                 esi,
3097                                 &new_irq_handled);
3098
3099                 if (new_irq_handled) {
3100                         /* ACK at DPCD to notify down stream */
3101                         const int ack_dpcd_bytes_to_write =
3102                                 dpcd_bytes_to_read - 1;
3103
3104                         for (retry = 0; retry < 3; retry++) {
3105                                 uint8_t wret;
3106
3107                                 wret = drm_dp_dpcd_write(
3108                                         &aconnector->dm_dp_aux.aux,
3109                                         dpcd_addr + 1,
3110                                         &esi[1],
3111                                         ack_dpcd_bytes_to_write);
3112                                 if (wret == ack_dpcd_bytes_to_write)
3113                                         break;
3114                         }
3115
3116                         /* check if there is new irq to be handled */
3117                         dret = drm_dp_dpcd_read(
3118                                 &aconnector->dm_dp_aux.aux,
3119                                 dpcd_addr,
3120                                 esi,
3121                                 dpcd_bytes_to_read);
3122
3123                         new_irq_handled = false;
3124                 } else {
3125                         break;
3126                 }
3127         }
3128
3129         if (process_count == max_process_count)
3130                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3131 }
3132
3133 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3134                                                         union hpd_irq_data hpd_irq_data)
3135 {
3136         struct hpd_rx_irq_offload_work *offload_work =
3137                                 kzalloc(sizeof(*offload_work), GFP_KERNEL);
3138
3139         if (!offload_work) {
3140                 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3141                 return;
3142         }
3143
3144         INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3145         offload_work->data = hpd_irq_data;
3146         offload_work->offload_wq = offload_wq;
3147
3148         queue_work(offload_wq->wq, &offload_work->work);
3149         DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3150 }
3151
3152 static void handle_hpd_rx_irq(void *param)
3153 {
3154         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3155         struct drm_connector *connector = &aconnector->base;
3156         struct drm_device *dev = connector->dev;
3157         struct dc_link *dc_link = aconnector->dc_link;
3158         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3159         bool result = false;
3160         enum dc_connection_type new_connection_type = dc_connection_none;
3161         struct amdgpu_device *adev = drm_to_adev(dev);
3162         union hpd_irq_data hpd_irq_data;
3163         bool link_loss = false;
3164         bool has_left_work = false;
3165         int idx = aconnector->base.index;
3166         struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3167
3168         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3169
3170         if (adev->dm.disable_hpd_irq)
3171                 return;
3172
3173         /*
3174          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3175          * conflict, after implement i2c helper, this mutex should be
3176          * retired.
3177          */
3178         mutex_lock(&aconnector->hpd_lock);
3179
3180         result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3181                                                 &link_loss, true, &has_left_work);
3182
3183         if (!has_left_work)
3184                 goto out;
3185
3186         if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3187                 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3188                 goto out;
3189         }
3190
3191         if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3192                 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3193                         hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3194                         dm_handle_mst_sideband_msg(aconnector);
3195                         goto out;
3196                 }
3197
3198                 if (link_loss) {
3199                         bool skip = false;
3200
3201                         spin_lock(&offload_wq->offload_lock);
3202                         skip = offload_wq->is_handling_link_loss;
3203
3204                         if (!skip)
3205                                 offload_wq->is_handling_link_loss = true;
3206
3207                         spin_unlock(&offload_wq->offload_lock);
3208
3209                         if (!skip)
3210                                 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3211
3212                         goto out;
3213                 }
3214         }
3215
3216 out:
3217         if (result && !is_mst_root_connector) {
3218                 /* Downstream Port status changed. */
3219                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
3220                         DRM_ERROR("KMS: Failed to detect connector\n");
3221
3222                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3223                         emulated_link_detect(dc_link);
3224
3225                         if (aconnector->fake_enable)
3226                                 aconnector->fake_enable = false;
3227
3228                         amdgpu_dm_update_connector_after_detect(aconnector);
3229
3230
3231                         drm_modeset_lock_all(dev);
3232                         dm_restore_drm_connector_state(dev, connector);
3233                         drm_modeset_unlock_all(dev);
3234
3235                         drm_kms_helper_hotplug_event(dev);
3236                 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
3237
3238                         if (aconnector->fake_enable)
3239                                 aconnector->fake_enable = false;
3240
3241                         amdgpu_dm_update_connector_after_detect(aconnector);
3242
3243
3244                         drm_modeset_lock_all(dev);
3245                         dm_restore_drm_connector_state(dev, connector);
3246                         drm_modeset_unlock_all(dev);
3247
3248                         drm_kms_helper_hotplug_event(dev);
3249                 }
3250         }
3251 #ifdef CONFIG_DRM_AMD_DC_HDCP
3252         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3253                 if (adev->dm.hdcp_workqueue)
3254                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
3255         }
3256 #endif
3257
3258         if (dc_link->type != dc_connection_mst_branch)
3259                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3260
3261         mutex_unlock(&aconnector->hpd_lock);
3262 }
3263
3264 static void register_hpd_handlers(struct amdgpu_device *adev)
3265 {
3266         struct drm_device *dev = adev_to_drm(adev);
3267         struct drm_connector *connector;
3268         struct amdgpu_dm_connector *aconnector;
3269         const struct dc_link *dc_link;
3270         struct dc_interrupt_params int_params = {0};
3271
3272         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3273         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3274
3275         list_for_each_entry(connector,
3276                         &dev->mode_config.connector_list, head) {
3277
3278                 aconnector = to_amdgpu_dm_connector(connector);
3279                 dc_link = aconnector->dc_link;
3280
3281                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3282                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3283                         int_params.irq_source = dc_link->irq_source_hpd;
3284
3285                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
3286                                         handle_hpd_irq,
3287                                         (void *) aconnector);
3288                 }
3289
3290                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3291
3292                         /* Also register for DP short pulse (hpd_rx). */
3293                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3294                         int_params.irq_source = dc_link->irq_source_hpd_rx;
3295
3296                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
3297                                         handle_hpd_rx_irq,
3298                                         (void *) aconnector);
3299
3300                         if (adev->dm.hpd_rx_offload_wq)
3301                                 adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3302                                         aconnector;
3303                 }
3304         }
3305 }
3306
3307 #if defined(CONFIG_DRM_AMD_DC_SI)
3308 /* Register IRQ sources and initialize IRQ callbacks */
3309 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3310 {
3311         struct dc *dc = adev->dm.dc;
3312         struct common_irq_params *c_irq_params;
3313         struct dc_interrupt_params int_params = {0};
3314         int r;
3315         int i;
3316         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3317
3318         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3319         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3320
3321         /*
3322          * Actions of amdgpu_irq_add_id():
3323          * 1. Register a set() function with base driver.
3324          *    Base driver will call set() function to enable/disable an
3325          *    interrupt in DC hardware.
3326          * 2. Register amdgpu_dm_irq_handler().
3327          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3328          *    coming from DC hardware.
3329          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3330          *    for acknowledging and handling. */
3331
3332         /* Use VBLANK interrupt */
3333         for (i = 0; i < adev->mode_info.num_crtc; i++) {
3334                 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3335                 if (r) {
3336                         DRM_ERROR("Failed to add crtc irq id!\n");
3337                         return r;
3338                 }
3339
3340                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3341                 int_params.irq_source =
3342                         dc_interrupt_to_irq_source(dc, i+1 , 0);
3343
3344                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3345
3346                 c_irq_params->adev = adev;
3347                 c_irq_params->irq_src = int_params.irq_source;
3348
3349                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3350                                 dm_crtc_high_irq, c_irq_params);
3351         }
3352
3353         /* Use GRPH_PFLIP interrupt */
3354         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3355                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3356                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3357                 if (r) {
3358                         DRM_ERROR("Failed to add page flip irq id!\n");
3359                         return r;
3360                 }
3361
3362                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3363                 int_params.irq_source =
3364                         dc_interrupt_to_irq_source(dc, i, 0);
3365
3366                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3367
3368                 c_irq_params->adev = adev;
3369                 c_irq_params->irq_src = int_params.irq_source;
3370
3371                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3372                                 dm_pflip_high_irq, c_irq_params);
3373
3374         }
3375
3376         /* HPD */
3377         r = amdgpu_irq_add_id(adev, client_id,
3378                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3379         if (r) {
3380                 DRM_ERROR("Failed to add hpd irq id!\n");
3381                 return r;
3382         }
3383
3384         register_hpd_handlers(adev);
3385
3386         return 0;
3387 }
3388 #endif
3389
3390 /* Register IRQ sources and initialize IRQ callbacks */
3391 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3392 {
3393         struct dc *dc = adev->dm.dc;
3394         struct common_irq_params *c_irq_params;
3395         struct dc_interrupt_params int_params = {0};
3396         int r;
3397         int i;
3398         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3399
3400         if (adev->asic_type >= CHIP_VEGA10)
3401                 client_id = SOC15_IH_CLIENTID_DCE;
3402
3403         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3404         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3405
3406         /*
3407          * Actions of amdgpu_irq_add_id():
3408          * 1. Register a set() function with base driver.
3409          *    Base driver will call set() function to enable/disable an
3410          *    interrupt in DC hardware.
3411          * 2. Register amdgpu_dm_irq_handler().
3412          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3413          *    coming from DC hardware.
3414          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3415          *    for acknowledging and handling. */
3416
3417         /* Use VBLANK interrupt */
3418         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3419                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3420                 if (r) {
3421                         DRM_ERROR("Failed to add crtc irq id!\n");
3422                         return r;
3423                 }
3424
3425                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3426                 int_params.irq_source =
3427                         dc_interrupt_to_irq_source(dc, i, 0);
3428
3429                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3430
3431                 c_irq_params->adev = adev;
3432                 c_irq_params->irq_src = int_params.irq_source;
3433
3434                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3435                                 dm_crtc_high_irq, c_irq_params);
3436         }
3437
3438         /* Use VUPDATE interrupt */
3439         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3440                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3441                 if (r) {
3442                         DRM_ERROR("Failed to add vupdate irq id!\n");
3443                         return r;
3444                 }
3445
3446                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3447                 int_params.irq_source =
3448                         dc_interrupt_to_irq_source(dc, i, 0);
3449
3450                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3451
3452                 c_irq_params->adev = adev;
3453                 c_irq_params->irq_src = int_params.irq_source;
3454
3455                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3456                                 dm_vupdate_high_irq, c_irq_params);
3457         }
3458
3459         /* Use GRPH_PFLIP interrupt */
3460         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3461                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3462                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3463                 if (r) {
3464                         DRM_ERROR("Failed to add page flip irq id!\n");
3465                         return r;
3466                 }
3467
3468                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3469                 int_params.irq_source =
3470                         dc_interrupt_to_irq_source(dc, i, 0);
3471
3472                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3473
3474                 c_irq_params->adev = adev;
3475                 c_irq_params->irq_src = int_params.irq_source;
3476
3477                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3478                                 dm_pflip_high_irq, c_irq_params);
3479
3480         }
3481
3482         /* HPD */
3483         r = amdgpu_irq_add_id(adev, client_id,
3484                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3485         if (r) {
3486                 DRM_ERROR("Failed to add hpd irq id!\n");
3487                 return r;
3488         }
3489
3490         register_hpd_handlers(adev);
3491
3492         return 0;
3493 }
3494
3495 #if defined(CONFIG_DRM_AMD_DC_DCN)
3496 /* Register IRQ sources and initialize IRQ callbacks */
3497 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3498 {
3499         struct dc *dc = adev->dm.dc;
3500         struct common_irq_params *c_irq_params;
3501         struct dc_interrupt_params int_params = {0};
3502         int r;
3503         int i;
3504 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3505         static const unsigned int vrtl_int_srcid[] = {
3506                 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3507                 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3508                 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3509                 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3510                 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3511                 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3512         };
3513 #endif
3514
3515         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3516         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3517
3518         /*
3519          * Actions of amdgpu_irq_add_id():
3520          * 1. Register a set() function with base driver.
3521          *    Base driver will call set() function to enable/disable an
3522          *    interrupt in DC hardware.
3523          * 2. Register amdgpu_dm_irq_handler().
3524          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3525          *    coming from DC hardware.
3526          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3527          *    for acknowledging and handling.
3528          */
3529
3530         /* Use VSTARTUP interrupt */
3531         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3532                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3533                         i++) {
3534                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3535
3536                 if (r) {
3537                         DRM_ERROR("Failed to add crtc irq id!\n");
3538                         return r;
3539                 }
3540
3541                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3542                 int_params.irq_source =
3543                         dc_interrupt_to_irq_source(dc, i, 0);
3544
3545                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3546
3547                 c_irq_params->adev = adev;
3548                 c_irq_params->irq_src = int_params.irq_source;
3549
3550                 amdgpu_dm_irq_register_interrupt(
3551                         adev, &int_params, dm_crtc_high_irq, c_irq_params);
3552         }
3553
3554         /* Use otg vertical line interrupt */
3555 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3556         for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3557                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3558                                 vrtl_int_srcid[i], &adev->vline0_irq);
3559
3560                 if (r) {
3561                         DRM_ERROR("Failed to add vline0 irq id!\n");
3562                         return r;
3563                 }
3564
3565                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3566                 int_params.irq_source =
3567                         dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3568
3569                 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3570                         DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3571                         break;
3572                 }
3573
3574                 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3575                                         - DC_IRQ_SOURCE_DC1_VLINE0];
3576
3577                 c_irq_params->adev = adev;
3578                 c_irq_params->irq_src = int_params.irq_source;
3579
3580                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3581                                 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3582         }
3583 #endif
3584
3585         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3586          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3587          * to trigger at end of each vblank, regardless of state of the lock,
3588          * matching DCE behaviour.
3589          */
3590         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3591              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3592              i++) {
3593                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3594
3595                 if (r) {
3596                         DRM_ERROR("Failed to add vupdate irq id!\n");
3597                         return r;
3598                 }
3599
3600                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3601                 int_params.irq_source =
3602                         dc_interrupt_to_irq_source(dc, i, 0);
3603
3604                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3605
3606                 c_irq_params->adev = adev;
3607                 c_irq_params->irq_src = int_params.irq_source;
3608
3609                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3610                                 dm_vupdate_high_irq, c_irq_params);
3611         }
3612
3613         /* Use GRPH_PFLIP interrupt */
3614         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3615                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
3616                         i++) {
3617                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3618                 if (r) {
3619                         DRM_ERROR("Failed to add page flip irq id!\n");
3620                         return r;
3621                 }
3622
3623                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3624                 int_params.irq_source =
3625                         dc_interrupt_to_irq_source(dc, i, 0);
3626
3627                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3628
3629                 c_irq_params->adev = adev;
3630                 c_irq_params->irq_src = int_params.irq_source;
3631
3632                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3633                                 dm_pflip_high_irq, c_irq_params);
3634
3635         }
3636
3637         /* HPD */
3638         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3639                         &adev->hpd_irq);
3640         if (r) {
3641                 DRM_ERROR("Failed to add hpd irq id!\n");
3642                 return r;
3643         }
3644
3645         register_hpd_handlers(adev);
3646
3647         return 0;
3648 }
3649 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3650 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3651 {
3652         struct dc *dc = adev->dm.dc;
3653         struct common_irq_params *c_irq_params;
3654         struct dc_interrupt_params int_params = {0};
3655         int r, i;
3656
3657         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3658         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3659
3660         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3661                         &adev->dmub_outbox_irq);
3662         if (r) {
3663                 DRM_ERROR("Failed to add outbox irq id!\n");
3664                 return r;
3665         }
3666
3667         if (dc->ctx->dmub_srv) {
3668                 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3669                 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3670                 int_params.irq_source =
3671                 dc_interrupt_to_irq_source(dc, i, 0);
3672
3673                 c_irq_params = &adev->dm.dmub_outbox_params[0];
3674
3675                 c_irq_params->adev = adev;
3676                 c_irq_params->irq_src = int_params.irq_source;
3677
3678                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3679                                 dm_dmub_outbox1_low_irq, c_irq_params);
3680         }
3681
3682         return 0;
3683 }
3684 #endif
3685
3686 /*
3687  * Acquires the lock for the atomic state object and returns
3688  * the new atomic state.
3689  *
3690  * This should only be called during atomic check.
3691  */
3692 static int dm_atomic_get_state(struct drm_atomic_state *state,
3693                                struct dm_atomic_state **dm_state)
3694 {
3695         struct drm_device *dev = state->dev;
3696         struct amdgpu_device *adev = drm_to_adev(dev);
3697         struct amdgpu_display_manager *dm = &adev->dm;
3698         struct drm_private_state *priv_state;
3699
3700         if (*dm_state)
3701                 return 0;
3702
3703         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3704         if (IS_ERR(priv_state))
3705                 return PTR_ERR(priv_state);
3706
3707         *dm_state = to_dm_atomic_state(priv_state);
3708
3709         return 0;
3710 }
3711
3712 static struct dm_atomic_state *
3713 dm_atomic_get_new_state(struct drm_atomic_state *state)
3714 {
3715         struct drm_device *dev = state->dev;
3716         struct amdgpu_device *adev = drm_to_adev(dev);
3717         struct amdgpu_display_manager *dm = &adev->dm;
3718         struct drm_private_obj *obj;
3719         struct drm_private_state *new_obj_state;
3720         int i;
3721
3722         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3723                 if (obj->funcs == dm->atomic_obj.funcs)
3724                         return to_dm_atomic_state(new_obj_state);
3725         }
3726
3727         return NULL;
3728 }
3729
3730 static struct drm_private_state *
3731 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3732 {
3733         struct dm_atomic_state *old_state, *new_state;
3734
3735         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3736         if (!new_state)
3737                 return NULL;
3738
3739         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3740
3741         old_state = to_dm_atomic_state(obj->state);
3742
3743         if (old_state && old_state->context)
3744                 new_state->context = dc_copy_state(old_state->context);
3745
3746         if (!new_state->context) {
3747                 kfree(new_state);
3748                 return NULL;
3749         }
3750
3751         return &new_state->base;
3752 }
3753
3754 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3755                                     struct drm_private_state *state)
3756 {
3757         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3758
3759         if (dm_state && dm_state->context)
3760                 dc_release_state(dm_state->context);
3761
3762         kfree(dm_state);
3763 }
3764
3765 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3766         .atomic_duplicate_state = dm_atomic_duplicate_state,
3767         .atomic_destroy_state = dm_atomic_destroy_state,
3768 };
3769
3770 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3771 {
3772         struct dm_atomic_state *state;
3773         int r;
3774
3775         adev->mode_info.mode_config_initialized = true;
3776
3777         adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3778         adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3779
3780         adev_to_drm(adev)->mode_config.max_width = 16384;
3781         adev_to_drm(adev)->mode_config.max_height = 16384;
3782
3783         adev_to_drm(adev)->mode_config.preferred_depth = 24;
3784         adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3785         /* indicates support for immediate flip */
3786         adev_to_drm(adev)->mode_config.async_page_flip = true;
3787
3788         adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3789
3790         state = kzalloc(sizeof(*state), GFP_KERNEL);
3791         if (!state)
3792                 return -ENOMEM;
3793
3794         state->context = dc_create_state(adev->dm.dc);
3795         if (!state->context) {
3796                 kfree(state);
3797                 return -ENOMEM;
3798         }
3799
3800         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3801
3802         drm_atomic_private_obj_init(adev_to_drm(adev),
3803                                     &adev->dm.atomic_obj,
3804                                     &state->base,
3805                                     &dm_atomic_state_funcs);
3806
3807         r = amdgpu_display_modeset_create_props(adev);
3808         if (r) {
3809                 dc_release_state(state->context);
3810                 kfree(state);
3811                 return r;
3812         }
3813
3814         r = amdgpu_dm_audio_init(adev);
3815         if (r) {
3816                 dc_release_state(state->context);
3817                 kfree(state);
3818                 return r;
3819         }
3820
3821         return 0;
3822 }
3823
3824 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3825 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3826 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3827
3828 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3829         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3830
3831 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3832                                             int bl_idx)
3833 {
3834 #if defined(CONFIG_ACPI)
3835         struct amdgpu_dm_backlight_caps caps;
3836
3837         memset(&caps, 0, sizeof(caps));
3838
3839         if (dm->backlight_caps[bl_idx].caps_valid)
3840                 return;
3841
3842         amdgpu_acpi_get_backlight_caps(&caps);
3843         if (caps.caps_valid) {
3844                 dm->backlight_caps[bl_idx].caps_valid = true;
3845                 if (caps.aux_support)
3846                         return;
3847                 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3848                 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3849         } else {
3850                 dm->backlight_caps[bl_idx].min_input_signal =
3851                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3852                 dm->backlight_caps[bl_idx].max_input_signal =
3853                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3854         }
3855 #else
3856         if (dm->backlight_caps[bl_idx].aux_support)
3857                 return;
3858
3859         dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3860         dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3861 #endif
3862 }
3863
3864 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3865                                 unsigned *min, unsigned *max)
3866 {
3867         if (!caps)
3868                 return 0;
3869
3870         if (caps->aux_support) {
3871                 // Firmware limits are in nits, DC API wants millinits.
3872                 *max = 1000 * caps->aux_max_input_signal;
3873                 *min = 1000 * caps->aux_min_input_signal;
3874         } else {
3875                 // Firmware limits are 8-bit, PWM control is 16-bit.
3876                 *max = 0x101 * caps->max_input_signal;
3877                 *min = 0x101 * caps->min_input_signal;
3878         }
3879         return 1;
3880 }
3881
3882 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3883                                         uint32_t brightness)
3884 {
3885         unsigned min, max;
3886
3887         if (!get_brightness_range(caps, &min, &max))
3888                 return brightness;
3889
3890         // Rescale 0..255 to min..max
3891         return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3892                                        AMDGPU_MAX_BL_LEVEL);
3893 }
3894
3895 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3896                                       uint32_t brightness)
3897 {
3898         unsigned min, max;
3899
3900         if (!get_brightness_range(caps, &min, &max))
3901                 return brightness;
3902
3903         if (brightness < min)
3904                 return 0;
3905         // Rescale min..max to 0..255
3906         return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3907                                  max - min);
3908 }
3909
3910 static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3911                                          int bl_idx,
3912                                          u32 user_brightness)
3913 {
3914         struct amdgpu_dm_backlight_caps caps;
3915         struct dc_link *link;
3916         u32 brightness;
3917         bool rc;
3918
3919         amdgpu_dm_update_backlight_caps(dm, bl_idx);
3920         caps = dm->backlight_caps[bl_idx];
3921
3922         dm->brightness[bl_idx] = user_brightness;
3923         brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3924         link = (struct dc_link *)dm->backlight_link[bl_idx];
3925
3926         /* Change brightness based on AUX property */
3927         if (caps.aux_support) {
3928                 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3929                                                       AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3930                 if (!rc)
3931                         DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
3932         } else {
3933                 rc = dc_link_set_backlight_level(link, brightness, 0);
3934                 if (!rc)
3935                         DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
3936         }
3937
3938         if (rc)
3939                 dm->actual_brightness[bl_idx] = user_brightness;
3940 }
3941
3942 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3943 {
3944         struct amdgpu_display_manager *dm = bl_get_data(bd);
3945         int i;
3946
3947         for (i = 0; i < dm->num_of_edps; i++) {
3948                 if (bd == dm->backlight_dev[i])
3949                         break;
3950         }
3951         if (i >= AMDGPU_DM_MAX_NUM_EDP)
3952                 i = 0;
3953         amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
3954
3955         return 0;
3956 }
3957
3958 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
3959                                          int bl_idx)
3960 {
3961         struct amdgpu_dm_backlight_caps caps;
3962         struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
3963
3964         amdgpu_dm_update_backlight_caps(dm, bl_idx);
3965         caps = dm->backlight_caps[bl_idx];
3966
3967         if (caps.aux_support) {
3968                 u32 avg, peak;
3969                 bool rc;
3970
3971                 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3972                 if (!rc)
3973                         return dm->brightness[bl_idx];
3974                 return convert_brightness_to_user(&caps, avg);
3975         } else {
3976                 int ret = dc_link_get_backlight_level(link);
3977
3978                 if (ret == DC_ERROR_UNEXPECTED)
3979                         return dm->brightness[bl_idx];
3980                 return convert_brightness_to_user(&caps, ret);
3981         }
3982 }
3983
3984 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3985 {
3986         struct amdgpu_display_manager *dm = bl_get_data(bd);
3987         int i;
3988
3989         for (i = 0; i < dm->num_of_edps; i++) {
3990                 if (bd == dm->backlight_dev[i])
3991                         break;
3992         }
3993         if (i >= AMDGPU_DM_MAX_NUM_EDP)
3994                 i = 0;
3995         return amdgpu_dm_backlight_get_level(dm, i);
3996 }
3997
3998 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3999         .options = BL_CORE_SUSPENDRESUME,
4000         .get_brightness = amdgpu_dm_backlight_get_brightness,
4001         .update_status  = amdgpu_dm_backlight_update_status,
4002 };
4003
4004 static void
4005 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4006 {
4007         char bl_name[16];
4008         struct backlight_properties props = { 0 };
4009
4010         amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4011         dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4012
4013         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4014         props.brightness = AMDGPU_MAX_BL_LEVEL;
4015         props.type = BACKLIGHT_RAW;
4016
4017         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4018                  adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4019
4020         dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4021                                                                        adev_to_drm(dm->adev)->dev,
4022                                                                        dm,
4023                                                                        &amdgpu_dm_backlight_ops,
4024                                                                        &props);
4025
4026         if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4027                 DRM_ERROR("DM: Backlight registration failed!\n");
4028         else
4029                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4030 }
4031 #endif
4032
4033 static int initialize_plane(struct amdgpu_display_manager *dm,
4034                             struct amdgpu_mode_info *mode_info, int plane_id,
4035                             enum drm_plane_type plane_type,
4036                             const struct dc_plane_cap *plane_cap)
4037 {
4038         struct drm_plane *plane;
4039         unsigned long possible_crtcs;
4040         int ret = 0;
4041
4042         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4043         if (!plane) {
4044                 DRM_ERROR("KMS: Failed to allocate plane\n");
4045                 return -ENOMEM;
4046         }
4047         plane->type = plane_type;
4048
4049         /*
4050          * HACK: IGT tests expect that the primary plane for a CRTC
4051          * can only have one possible CRTC. Only expose support for
4052          * any CRTC if they're not going to be used as a primary plane
4053          * for a CRTC - like overlay or underlay planes.
4054          */
4055         possible_crtcs = 1 << plane_id;
4056         if (plane_id >= dm->dc->caps.max_streams)
4057                 possible_crtcs = 0xff;
4058
4059         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4060
4061         if (ret) {
4062                 DRM_ERROR("KMS: Failed to initialize plane\n");
4063                 kfree(plane);
4064                 return ret;
4065         }
4066
4067         if (mode_info)
4068                 mode_info->planes[plane_id] = plane;
4069
4070         return ret;
4071 }
4072
4073
4074 static void register_backlight_device(struct amdgpu_display_manager *dm,
4075                                       struct dc_link *link)
4076 {
4077 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4078         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4079
4080         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4081             link->type != dc_connection_none) {
4082                 /*
4083                  * Event if registration failed, we should continue with
4084                  * DM initialization because not having a backlight control
4085                  * is better then a black screen.
4086                  */
4087                 if (!dm->backlight_dev[dm->num_of_edps])
4088                         amdgpu_dm_register_backlight_device(dm);
4089
4090                 if (dm->backlight_dev[dm->num_of_edps]) {
4091                         dm->backlight_link[dm->num_of_edps] = link;
4092                         dm->num_of_edps++;
4093                 }
4094         }
4095 #endif
4096 }
4097
4098
4099 /*
4100  * In this architecture, the association
4101  * connector -> encoder -> crtc
4102  * id not really requried. The crtc and connector will hold the
4103  * display_index as an abstraction to use with DAL component
4104  *
4105  * Returns 0 on success
4106  */
4107 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4108 {
4109         struct amdgpu_display_manager *dm = &adev->dm;
4110         int32_t i;
4111         struct amdgpu_dm_connector *aconnector = NULL;
4112         struct amdgpu_encoder *aencoder = NULL;
4113         struct amdgpu_mode_info *mode_info = &adev->mode_info;
4114         uint32_t link_cnt;
4115         int32_t primary_planes;
4116         enum dc_connection_type new_connection_type = dc_connection_none;
4117         const struct dc_plane_cap *plane;
4118
4119         dm->display_indexes_num = dm->dc->caps.max_streams;
4120         /* Update the actual used number of crtc */
4121         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4122
4123         link_cnt = dm->dc->caps.max_links;
4124         if (amdgpu_dm_mode_config_init(dm->adev)) {
4125                 DRM_ERROR("DM: Failed to initialize mode config\n");
4126                 return -EINVAL;
4127         }
4128
4129         /* There is one primary plane per CRTC */
4130         primary_planes = dm->dc->caps.max_streams;
4131         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4132
4133         /*
4134          * Initialize primary planes, implicit planes for legacy IOCTLS.
4135          * Order is reversed to match iteration order in atomic check.
4136          */
4137         for (i = (primary_planes - 1); i >= 0; i--) {
4138                 plane = &dm->dc->caps.planes[i];
4139
4140                 if (initialize_plane(dm, mode_info, i,
4141                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
4142                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
4143                         goto fail;
4144                 }
4145         }
4146
4147         /*
4148          * Initialize overlay planes, index starting after primary planes.
4149          * These planes have a higher DRM index than the primary planes since
4150          * they should be considered as having a higher z-order.
4151          * Order is reversed to match iteration order in atomic check.
4152          *
4153          * Only support DCN for now, and only expose one so we don't encourage
4154          * userspace to use up all the pipes.
4155          */
4156         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4157                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4158
4159                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4160                         continue;
4161
4162                 if (!plane->blends_with_above || !plane->blends_with_below)
4163                         continue;
4164
4165                 if (!plane->pixel_format_support.argb8888)
4166                         continue;
4167
4168                 if (initialize_plane(dm, NULL, primary_planes + i,
4169                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
4170                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4171                         goto fail;
4172                 }
4173
4174                 /* Only create one overlay plane. */
4175                 break;
4176         }
4177
4178         for (i = 0; i < dm->dc->caps.max_streams; i++)
4179                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4180                         DRM_ERROR("KMS: Failed to initialize crtc\n");
4181                         goto fail;
4182                 }
4183
4184 #if defined(CONFIG_DRM_AMD_DC_DCN)
4185         /* Use Outbox interrupt */
4186         switch (adev->asic_type) {
4187         case CHIP_SIENNA_CICHLID:
4188         case CHIP_NAVY_FLOUNDER:
4189         case CHIP_YELLOW_CARP:
4190         case CHIP_RENOIR:
4191                 if (register_outbox_irq_handlers(dm->adev)) {
4192                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4193                         goto fail;
4194                 }
4195                 break;
4196         default:
4197                 DRM_DEBUG_KMS("Unsupported ASIC type for outbox: 0x%X\n", adev->asic_type);
4198         }
4199 #endif
4200
4201         /* loops over all connectors on the board */
4202         for (i = 0; i < link_cnt; i++) {
4203                 struct dc_link *link = NULL;
4204
4205                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4206                         DRM_ERROR(
4207                                 "KMS: Cannot support more than %d display indexes\n",
4208                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
4209                         continue;
4210                 }
4211
4212                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4213                 if (!aconnector)
4214                         goto fail;
4215
4216                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4217                 if (!aencoder)
4218                         goto fail;
4219
4220                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4221                         DRM_ERROR("KMS: Failed to initialize encoder\n");
4222                         goto fail;
4223                 }
4224
4225                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4226                         DRM_ERROR("KMS: Failed to initialize connector\n");
4227                         goto fail;
4228                 }
4229
4230                 link = dc_get_link_at_index(dm->dc, i);
4231
4232                 if (!dc_link_detect_sink(link, &new_connection_type))
4233                         DRM_ERROR("KMS: Failed to detect connector\n");
4234
4235                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
4236                         emulated_link_detect(link);
4237                         amdgpu_dm_update_connector_after_detect(aconnector);
4238
4239                 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4240                         amdgpu_dm_update_connector_after_detect(aconnector);
4241                         register_backlight_device(dm, link);
4242
4243                         if (dm->num_of_edps)
4244                                 update_connector_ext_caps(aconnector);
4245                         if (amdgpu_dc_feature_mask & DC_PSR_MASK)
4246                                 amdgpu_dm_set_psr_caps(link);
4247
4248                         /* TODO: Fix vblank control helpers to delay PSR entry to allow this when
4249                          * PSR is also supported.
4250                          */
4251                         if (link->psr_settings.psr_feature_enabled)
4252                                 adev_to_drm(adev)->vblank_disable_immediate = false;
4253                 }
4254
4255
4256         }
4257
4258         /* Software is initialized. Now we can register interrupt handlers. */
4259         switch (adev->asic_type) {
4260 #if defined(CONFIG_DRM_AMD_DC_SI)
4261         case CHIP_TAHITI:
4262         case CHIP_PITCAIRN:
4263         case CHIP_VERDE:
4264         case CHIP_OLAND:
4265                 if (dce60_register_irq_handlers(dm->adev)) {
4266                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4267                         goto fail;
4268                 }
4269                 break;
4270 #endif
4271         case CHIP_BONAIRE:
4272         case CHIP_HAWAII:
4273         case CHIP_KAVERI:
4274         case CHIP_KABINI:
4275         case CHIP_MULLINS:
4276         case CHIP_TONGA:
4277         case CHIP_FIJI:
4278         case CHIP_CARRIZO:
4279         case CHIP_STONEY:
4280         case CHIP_POLARIS11:
4281         case CHIP_POLARIS10:
4282         case CHIP_POLARIS12:
4283         case CHIP_VEGAM:
4284         case CHIP_VEGA10:
4285         case CHIP_VEGA12:
4286         case CHIP_VEGA20:
4287                 if (dce110_register_irq_handlers(dm->adev)) {
4288                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4289                         goto fail;
4290                 }
4291                 break;
4292 #if defined(CONFIG_DRM_AMD_DC_DCN)
4293         case CHIP_RAVEN:
4294         case CHIP_NAVI12:
4295         case CHIP_NAVI10:
4296         case CHIP_NAVI14:
4297         case CHIP_RENOIR:
4298         case CHIP_SIENNA_CICHLID:
4299         case CHIP_NAVY_FLOUNDER:
4300         case CHIP_DIMGREY_CAVEFISH:
4301         case CHIP_BEIGE_GOBY:
4302         case CHIP_VANGOGH:
4303         case CHIP_YELLOW_CARP:
4304                 if (dcn10_register_irq_handlers(dm->adev)) {
4305                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4306                         goto fail;
4307                 }
4308                 break;
4309 #endif
4310         default:
4311                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4312                 goto fail;
4313         }
4314
4315         return 0;
4316 fail:
4317         kfree(aencoder);
4318         kfree(aconnector);
4319
4320         return -EINVAL;
4321 }
4322
4323 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4324 {
4325         drm_atomic_private_obj_fini(&dm->atomic_obj);
4326         return;
4327 }
4328
4329 /******************************************************************************
4330  * amdgpu_display_funcs functions
4331  *****************************************************************************/
4332
4333 /*
4334  * dm_bandwidth_update - program display watermarks
4335  *
4336  * @adev: amdgpu_device pointer
4337  *
4338  * Calculate and program the display watermarks and line buffer allocation.
4339  */
4340 static void dm_bandwidth_update(struct amdgpu_device *adev)
4341 {
4342         /* TODO: implement later */
4343 }
4344
4345 static const struct amdgpu_display_funcs dm_display_funcs = {
4346         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4347         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4348         .backlight_set_level = NULL, /* never called for DC */
4349         .backlight_get_level = NULL, /* never called for DC */
4350         .hpd_sense = NULL,/* called unconditionally */
4351         .hpd_set_polarity = NULL, /* called unconditionally */
4352         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4353         .page_flip_get_scanoutpos =
4354                 dm_crtc_get_scanoutpos,/* called unconditionally */
4355         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4356         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4357 };
4358
4359 #if defined(CONFIG_DEBUG_KERNEL_DC)
4360
4361 static ssize_t s3_debug_store(struct device *device,
4362                               struct device_attribute *attr,
4363                               const char *buf,
4364                               size_t count)
4365 {
4366         int ret;
4367         int s3_state;
4368         struct drm_device *drm_dev = dev_get_drvdata(device);
4369         struct amdgpu_device *adev = drm_to_adev(drm_dev);
4370
4371         ret = kstrtoint(buf, 0, &s3_state);
4372
4373         if (ret == 0) {
4374                 if (s3_state) {
4375                         dm_resume(adev);
4376                         drm_kms_helper_hotplug_event(adev_to_drm(adev));
4377                 } else
4378                         dm_suspend(adev);
4379         }
4380
4381         return ret == 0 ? count : 0;
4382 }
4383
4384 DEVICE_ATTR_WO(s3_debug);
4385
4386 #endif
4387
4388 static int dm_early_init(void *handle)
4389 {
4390         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4391
4392         switch (adev->asic_type) {
4393 #if defined(CONFIG_DRM_AMD_DC_SI)
4394         case CHIP_TAHITI:
4395         case CHIP_PITCAIRN:
4396         case CHIP_VERDE:
4397                 adev->mode_info.num_crtc = 6;
4398                 adev->mode_info.num_hpd = 6;
4399                 adev->mode_info.num_dig = 6;
4400                 break;
4401         case CHIP_OLAND:
4402                 adev->mode_info.num_crtc = 2;
4403                 adev->mode_info.num_hpd = 2;
4404                 adev->mode_info.num_dig = 2;
4405                 break;
4406 #endif
4407         case CHIP_BONAIRE:
4408         case CHIP_HAWAII:
4409                 adev->mode_info.num_crtc = 6;
4410                 adev->mode_info.num_hpd = 6;
4411                 adev->mode_info.num_dig = 6;
4412                 break;
4413         case CHIP_KAVERI:
4414                 adev->mode_info.num_crtc = 4;
4415                 adev->mode_info.num_hpd = 6;
4416                 adev->mode_info.num_dig = 7;
4417                 break;
4418         case CHIP_KABINI:
4419         case CHIP_MULLINS:
4420                 adev->mode_info.num_crtc = 2;
4421                 adev->mode_info.num_hpd = 6;
4422                 adev->mode_info.num_dig = 6;
4423                 break;
4424         case CHIP_FIJI:
4425         case CHIP_TONGA:
4426                 adev->mode_info.num_crtc = 6;
4427                 adev->mode_info.num_hpd = 6;
4428                 adev->mode_info.num_dig = 7;
4429                 break;
4430         case CHIP_CARRIZO:
4431                 adev->mode_info.num_crtc = 3;
4432                 adev->mode_info.num_hpd = 6;
4433                 adev->mode_info.num_dig = 9;
4434                 break;
4435         case CHIP_STONEY:
4436                 adev->mode_info.num_crtc = 2;
4437                 adev->mode_info.num_hpd = 6;
4438                 adev->mode_info.num_dig = 9;
4439                 break;
4440         case CHIP_POLARIS11:
4441         case CHIP_POLARIS12:
4442                 adev->mode_info.num_crtc = 5;
4443                 adev->mode_info.num_hpd = 5;
4444                 adev->mode_info.num_dig = 5;
4445                 break;
4446         case CHIP_POLARIS10:
4447         case CHIP_VEGAM:
4448                 adev->mode_info.num_crtc = 6;
4449                 adev->mode_info.num_hpd = 6;
4450                 adev->mode_info.num_dig = 6;
4451                 break;
4452         case CHIP_VEGA10:
4453         case CHIP_VEGA12:
4454         case CHIP_VEGA20:
4455                 adev->mode_info.num_crtc = 6;
4456                 adev->mode_info.num_hpd = 6;
4457                 adev->mode_info.num_dig = 6;
4458                 break;
4459 #if defined(CONFIG_DRM_AMD_DC_DCN)
4460         case CHIP_RAVEN:
4461         case CHIP_RENOIR:
4462         case CHIP_VANGOGH:
4463                 adev->mode_info.num_crtc = 4;
4464                 adev->mode_info.num_hpd = 4;
4465                 adev->mode_info.num_dig = 4;
4466                 break;
4467         case CHIP_NAVI10:
4468         case CHIP_NAVI12:
4469         case CHIP_SIENNA_CICHLID:
4470         case CHIP_NAVY_FLOUNDER:
4471                 adev->mode_info.num_crtc = 6;
4472                 adev->mode_info.num_hpd = 6;
4473                 adev->mode_info.num_dig = 6;
4474                 break;
4475         case CHIP_YELLOW_CARP:
4476                 adev->mode_info.num_crtc = 4;
4477                 adev->mode_info.num_hpd = 4;
4478                 adev->mode_info.num_dig = 4;
4479                 break;
4480         case CHIP_NAVI14:
4481         case CHIP_DIMGREY_CAVEFISH:
4482                 adev->mode_info.num_crtc = 5;
4483                 adev->mode_info.num_hpd = 5;
4484                 adev->mode_info.num_dig = 5;
4485                 break;
4486         case CHIP_BEIGE_GOBY:
4487                 adev->mode_info.num_crtc = 2;
4488                 adev->mode_info.num_hpd = 2;
4489                 adev->mode_info.num_dig = 2;
4490                 break;
4491 #endif
4492         default:
4493                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4494                 return -EINVAL;
4495         }
4496
4497         amdgpu_dm_set_irq_funcs(adev);
4498
4499         if (adev->mode_info.funcs == NULL)
4500                 adev->mode_info.funcs = &dm_display_funcs;
4501
4502         /*
4503          * Note: Do NOT change adev->audio_endpt_rreg and
4504          * adev->audio_endpt_wreg because they are initialised in
4505          * amdgpu_device_init()
4506          */
4507 #if defined(CONFIG_DEBUG_KERNEL_DC)
4508         device_create_file(
4509                 adev_to_drm(adev)->dev,
4510                 &dev_attr_s3_debug);
4511 #endif
4512
4513         return 0;
4514 }
4515
4516 static bool modeset_required(struct drm_crtc_state *crtc_state,
4517                              struct dc_stream_state *new_stream,
4518                              struct dc_stream_state *old_stream)
4519 {
4520         return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4521 }
4522
4523 static bool modereset_required(struct drm_crtc_state *crtc_state)
4524 {
4525         return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4526 }
4527
4528 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4529 {
4530         drm_encoder_cleanup(encoder);
4531         kfree(encoder);
4532 }
4533
4534 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4535         .destroy = amdgpu_dm_encoder_destroy,
4536 };
4537
4538
4539 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4540                                          struct drm_framebuffer *fb,
4541                                          int *min_downscale, int *max_upscale)
4542 {
4543         struct amdgpu_device *adev = drm_to_adev(dev);
4544         struct dc *dc = adev->dm.dc;
4545         /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4546         struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4547
4548         switch (fb->format->format) {
4549         case DRM_FORMAT_P010:
4550         case DRM_FORMAT_NV12:
4551         case DRM_FORMAT_NV21:
4552                 *max_upscale = plane_cap->max_upscale_factor.nv12;
4553                 *min_downscale = plane_cap->max_downscale_factor.nv12;
4554                 break;
4555
4556         case DRM_FORMAT_XRGB16161616F:
4557         case DRM_FORMAT_ARGB16161616F:
4558         case DRM_FORMAT_XBGR16161616F:
4559         case DRM_FORMAT_ABGR16161616F:
4560                 *max_upscale = plane_cap->max_upscale_factor.fp16;
4561                 *min_downscale = plane_cap->max_downscale_factor.fp16;
4562                 break;
4563
4564         default:
4565                 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4566                 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4567                 break;
4568         }
4569
4570         /*
4571          * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4572          * scaling factor of 1.0 == 1000 units.
4573          */
4574         if (*max_upscale == 1)
4575                 *max_upscale = 1000;
4576
4577         if (*min_downscale == 1)
4578                 *min_downscale = 1000;
4579 }
4580
4581
4582 static int fill_dc_scaling_info(const struct drm_plane_state *state,
4583                                 struct dc_scaling_info *scaling_info)
4584 {
4585         int scale_w, scale_h, min_downscale, max_upscale;
4586
4587         memset(scaling_info, 0, sizeof(*scaling_info));
4588
4589         /* Source is fixed 16.16 but we ignore mantissa for now... */
4590         scaling_info->src_rect.x = state->src_x >> 16;
4591         scaling_info->src_rect.y = state->src_y >> 16;
4592
4593         /*
4594          * For reasons we don't (yet) fully understand a non-zero
4595          * src_y coordinate into an NV12 buffer can cause a
4596          * system hang. To avoid hangs (and maybe be overly cautious)
4597          * let's reject both non-zero src_x and src_y.
4598          *
4599          * We currently know of only one use-case to reproduce a
4600          * scenario with non-zero src_x and src_y for NV12, which
4601          * is to gesture the YouTube Android app into full screen
4602          * on ChromeOS.
4603          */
4604         if (state->fb &&
4605             state->fb->format->format == DRM_FORMAT_NV12 &&
4606             (scaling_info->src_rect.x != 0 ||
4607              scaling_info->src_rect.y != 0))
4608                 return -EINVAL;
4609
4610         scaling_info->src_rect.width = state->src_w >> 16;
4611         if (scaling_info->src_rect.width == 0)
4612                 return -EINVAL;
4613
4614         scaling_info->src_rect.height = state->src_h >> 16;
4615         if (scaling_info->src_rect.height == 0)
4616                 return -EINVAL;
4617
4618         scaling_info->dst_rect.x = state->crtc_x;
4619         scaling_info->dst_rect.y = state->crtc_y;
4620
4621         if (state->crtc_w == 0)
4622                 return -EINVAL;
4623
4624         scaling_info->dst_rect.width = state->crtc_w;
4625
4626         if (state->crtc_h == 0)
4627                 return -EINVAL;
4628
4629         scaling_info->dst_rect.height = state->crtc_h;
4630
4631         /* DRM doesn't specify clipping on destination output. */
4632         scaling_info->clip_rect = scaling_info->dst_rect;
4633
4634         /* Validate scaling per-format with DC plane caps */
4635         if (state->plane && state->plane->dev && state->fb) {
4636                 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4637                                              &min_downscale, &max_upscale);
4638         } else {
4639                 min_downscale = 250;
4640                 max_upscale = 16000;
4641         }
4642
4643         scale_w = scaling_info->dst_rect.width * 1000 /
4644                   scaling_info->src_rect.width;
4645
4646         if (scale_w < min_downscale || scale_w > max_upscale)
4647                 return -EINVAL;
4648
4649         scale_h = scaling_info->dst_rect.height * 1000 /
4650                   scaling_info->src_rect.height;
4651
4652         if (scale_h < min_downscale || scale_h > max_upscale)
4653                 return -EINVAL;
4654
4655         /*
4656          * The "scaling_quality" can be ignored for now, quality = 0 has DC
4657          * assume reasonable defaults based on the format.
4658          */
4659
4660         return 0;
4661 }
4662
4663 static void
4664 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4665                                  uint64_t tiling_flags)
4666 {
4667         /* Fill GFX8 params */
4668         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4669                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4670
4671                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4672                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4673                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4674                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4675                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4676
4677                 /* XXX fix me for VI */
4678                 tiling_info->gfx8.num_banks = num_banks;
4679                 tiling_info->gfx8.array_mode =
4680                                 DC_ARRAY_2D_TILED_THIN1;
4681                 tiling_info->gfx8.tile_split = tile_split;
4682                 tiling_info->gfx8.bank_width = bankw;
4683                 tiling_info->gfx8.bank_height = bankh;
4684                 tiling_info->gfx8.tile_aspect = mtaspect;
4685                 tiling_info->gfx8.tile_mode =
4686                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4687         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4688                         == DC_ARRAY_1D_TILED_THIN1) {
4689                 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4690         }
4691
4692         tiling_info->gfx8.pipe_config =
4693                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4694 }
4695
4696 static void
4697 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4698                                   union dc_tiling_info *tiling_info)
4699 {
4700         tiling_info->gfx9.num_pipes =
4701                 adev->gfx.config.gb_addr_config_fields.num_pipes;
4702         tiling_info->gfx9.num_banks =
4703                 adev->gfx.config.gb_addr_config_fields.num_banks;
4704         tiling_info->gfx9.pipe_interleave =
4705                 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4706         tiling_info->gfx9.num_shader_engines =
4707                 adev->gfx.config.gb_addr_config_fields.num_se;
4708         tiling_info->gfx9.max_compressed_frags =
4709                 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4710         tiling_info->gfx9.num_rb_per_se =
4711                 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4712         tiling_info->gfx9.shaderEnable = 1;
4713         if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4714             adev->asic_type == CHIP_NAVY_FLOUNDER ||
4715             adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4716             adev->asic_type == CHIP_BEIGE_GOBY ||
4717             adev->asic_type == CHIP_YELLOW_CARP ||
4718             adev->asic_type == CHIP_VANGOGH)
4719                 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4720 }
4721
4722 static int
4723 validate_dcc(struct amdgpu_device *adev,
4724              const enum surface_pixel_format format,
4725              const enum dc_rotation_angle rotation,
4726              const union dc_tiling_info *tiling_info,
4727              const struct dc_plane_dcc_param *dcc,
4728              const struct dc_plane_address *address,
4729              const struct plane_size *plane_size)
4730 {
4731         struct dc *dc = adev->dm.dc;
4732         struct dc_dcc_surface_param input;
4733         struct dc_surface_dcc_cap output;
4734
4735         memset(&input, 0, sizeof(input));
4736         memset(&output, 0, sizeof(output));
4737
4738         if (!dcc->enable)
4739                 return 0;
4740
4741         if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4742             !dc->cap_funcs.get_dcc_compression_cap)
4743                 return -EINVAL;
4744
4745         input.format = format;
4746         input.surface_size.width = plane_size->surface_size.width;
4747         input.surface_size.height = plane_size->surface_size.height;
4748         input.swizzle_mode = tiling_info->gfx9.swizzle;
4749
4750         if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4751                 input.scan = SCAN_DIRECTION_HORIZONTAL;
4752         else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4753                 input.scan = SCAN_DIRECTION_VERTICAL;
4754
4755         if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4756                 return -EINVAL;
4757
4758         if (!output.capable)
4759                 return -EINVAL;
4760
4761         if (dcc->independent_64b_blks == 0 &&
4762             output.grph.rgb.independent_64b_blks != 0)
4763                 return -EINVAL;
4764
4765         return 0;
4766 }
4767
4768 static bool
4769 modifier_has_dcc(uint64_t modifier)
4770 {
4771         return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4772 }
4773
4774 static unsigned
4775 modifier_gfx9_swizzle_mode(uint64_t modifier)
4776 {
4777         if (modifier == DRM_FORMAT_MOD_LINEAR)
4778                 return 0;
4779
4780         return AMD_FMT_MOD_GET(TILE, modifier);
4781 }
4782
4783 static const struct drm_format_info *
4784 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4785 {
4786         return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4787 }
4788
4789 static void
4790 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4791                                     union dc_tiling_info *tiling_info,
4792                                     uint64_t modifier)
4793 {
4794         unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4795         unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4796         unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4797         unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4798
4799         fill_gfx9_tiling_info_from_device(adev, tiling_info);
4800
4801         if (!IS_AMD_FMT_MOD(modifier))
4802                 return;
4803
4804         tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4805         tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4806
4807         if (adev->family >= AMDGPU_FAMILY_NV) {
4808                 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4809         } else {
4810                 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4811
4812                 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4813         }
4814 }
4815
4816 enum dm_micro_swizzle {
4817         MICRO_SWIZZLE_Z = 0,
4818         MICRO_SWIZZLE_S = 1,
4819         MICRO_SWIZZLE_D = 2,
4820         MICRO_SWIZZLE_R = 3
4821 };
4822
4823 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4824                                           uint32_t format,
4825                                           uint64_t modifier)
4826 {
4827         struct amdgpu_device *adev = drm_to_adev(plane->dev);
4828         const struct drm_format_info *info = drm_format_info(format);
4829         int i;
4830
4831         enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4832
4833         if (!info)
4834                 return false;
4835
4836         /*
4837          * We always have to allow these modifiers:
4838          * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4839          * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4840          */
4841         if (modifier == DRM_FORMAT_MOD_LINEAR ||
4842             modifier == DRM_FORMAT_MOD_INVALID) {
4843                 return true;
4844         }
4845
4846         /* Check that the modifier is on the list of the plane's supported modifiers. */
4847         for (i = 0; i < plane->modifier_count; i++) {
4848                 if (modifier == plane->modifiers[i])
4849                         break;
4850         }
4851         if (i == plane->modifier_count)
4852                 return false;
4853
4854         /*
4855          * For D swizzle the canonical modifier depends on the bpp, so check
4856          * it here.
4857          */
4858         if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4859             adev->family >= AMDGPU_FAMILY_NV) {
4860                 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4861                         return false;
4862         }
4863
4864         if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4865             info->cpp[0] < 8)
4866                 return false;
4867
4868         if (modifier_has_dcc(modifier)) {
4869                 /* Per radeonsi comments 16/64 bpp are more complicated. */
4870                 if (info->cpp[0] != 4)
4871                         return false;
4872                 /* We support multi-planar formats, but not when combined with
4873                  * additional DCC metadata planes. */
4874                 if (info->num_planes > 1)
4875                         return false;
4876         }
4877
4878         return true;
4879 }
4880
4881 static void
4882 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4883 {
4884         if (!*mods)
4885                 return;
4886
4887         if (*cap - *size < 1) {
4888                 uint64_t new_cap = *cap * 2;
4889                 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4890
4891                 if (!new_mods) {
4892                         kfree(*mods);
4893                         *mods = NULL;
4894                         return;
4895                 }
4896
4897                 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4898                 kfree(*mods);
4899                 *mods = new_mods;
4900                 *cap = new_cap;
4901         }
4902
4903         (*mods)[*size] = mod;
4904         *size += 1;
4905 }
4906
4907 static void
4908 add_gfx9_modifiers(const struct amdgpu_device *adev,
4909                    uint64_t **mods, uint64_t *size, uint64_t *capacity)
4910 {
4911         int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4912         int pipe_xor_bits = min(8, pipes +
4913                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4914         int bank_xor_bits = min(8 - pipe_xor_bits,
4915                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4916         int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4917                  ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4918
4919
4920         if (adev->family == AMDGPU_FAMILY_RV) {
4921                 /* Raven2 and later */
4922                 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4923
4924                 /*
4925                  * No _D DCC swizzles yet because we only allow 32bpp, which
4926                  * doesn't support _D on DCN
4927                  */
4928
4929                 if (has_constant_encode) {
4930                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4931                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4932                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4933                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4934                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4935                                     AMD_FMT_MOD_SET(DCC, 1) |
4936                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4937                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4938                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4939                 }
4940
4941                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4942                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4943                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4944                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4945                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4946                             AMD_FMT_MOD_SET(DCC, 1) |
4947                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4948                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4949                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4950
4951                 if (has_constant_encode) {
4952                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4953                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4954                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4955                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4956                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4957                                     AMD_FMT_MOD_SET(DCC, 1) |
4958                                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4959                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4960                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4961
4962                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4963                                     AMD_FMT_MOD_SET(RB, rb) |
4964                                     AMD_FMT_MOD_SET(PIPE, pipes));
4965                 }
4966
4967                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4968                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4969                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4970                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4971                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4972                             AMD_FMT_MOD_SET(DCC, 1) |
4973                             AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4974                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4975                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4976                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4977                             AMD_FMT_MOD_SET(RB, rb) |
4978                             AMD_FMT_MOD_SET(PIPE, pipes));
4979         }
4980
4981         /*
4982          * Only supported for 64bpp on Raven, will be filtered on format in
4983          * dm_plane_format_mod_supported.
4984          */
4985         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4986                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4987                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4988                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4989                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4990
4991         if (adev->family == AMDGPU_FAMILY_RV) {
4992                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4993                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4994                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4995                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4996                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4997         }
4998
4999         /*
5000          * Only supported for 64bpp on Raven, will be filtered on format in
5001          * dm_plane_format_mod_supported.
5002          */
5003         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5004                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5005                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5006
5007         if (adev->family == AMDGPU_FAMILY_RV) {
5008                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5009                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5010                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5011         }
5012 }
5013
5014 static void
5015 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5016                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
5017 {
5018         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5019
5020         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5021                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5022                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5023                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5024                     AMD_FMT_MOD_SET(DCC, 1) |
5025                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5026                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5027                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5028
5029         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5030                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5031                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5032                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5033                     AMD_FMT_MOD_SET(DCC, 1) |
5034                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5035                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5036                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5037                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5038
5039         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5040                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5041                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5042                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5043
5044         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5045                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5046                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5047                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5048
5049
5050         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5051         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5052                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5053                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5054
5055         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5056                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5057                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5058 }
5059
5060 static void
5061 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5062                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
5063 {
5064         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5065         int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5066
5067         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5068                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5069                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5070                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5071                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5072                     AMD_FMT_MOD_SET(DCC, 1) |
5073                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5074                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5075                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5076                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5077
5078         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5079                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5080                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5081                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5082                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5083                     AMD_FMT_MOD_SET(DCC, 1) |
5084                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5085                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5086                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5087                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5088                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5089
5090         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5091                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5092                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5093                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5094                     AMD_FMT_MOD_SET(PACKERS, pkrs));
5095
5096         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5097                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5098                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5099                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5100                     AMD_FMT_MOD_SET(PACKERS, pkrs));
5101
5102         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5103         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5104                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5105                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5106
5107         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5108                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5109                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5110 }
5111
5112 static int
5113 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5114 {
5115         uint64_t size = 0, capacity = 128;
5116         *mods = NULL;
5117
5118         /* We have not hooked up any pre-GFX9 modifiers. */
5119         if (adev->family < AMDGPU_FAMILY_AI)
5120                 return 0;
5121
5122         *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5123
5124         if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5125                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5126                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5127                 return *mods ? 0 : -ENOMEM;
5128         }
5129
5130         switch (adev->family) {
5131         case AMDGPU_FAMILY_AI:
5132         case AMDGPU_FAMILY_RV:
5133                 add_gfx9_modifiers(adev, mods, &size, &capacity);
5134                 break;
5135         case AMDGPU_FAMILY_NV:
5136         case AMDGPU_FAMILY_VGH:
5137         case AMDGPU_FAMILY_YC:
5138                 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
5139                         add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5140                 else
5141                         add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5142                 break;
5143         }
5144
5145         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5146
5147         /* INVALID marks the end of the list. */
5148         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5149
5150         if (!*mods)
5151                 return -ENOMEM;
5152
5153         return 0;
5154 }
5155
5156 static int
5157 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5158                                           const struct amdgpu_framebuffer *afb,
5159                                           const enum surface_pixel_format format,
5160                                           const enum dc_rotation_angle rotation,
5161                                           const struct plane_size *plane_size,
5162                                           union dc_tiling_info *tiling_info,
5163                                           struct dc_plane_dcc_param *dcc,
5164                                           struct dc_plane_address *address,
5165                                           const bool force_disable_dcc)
5166 {
5167         const uint64_t modifier = afb->base.modifier;
5168         int ret = 0;
5169
5170         fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5171         tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5172
5173         if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5174                 uint64_t dcc_address = afb->address + afb->base.offsets[1];
5175
5176                 dcc->enable = 1;
5177                 dcc->meta_pitch = afb->base.pitches[1];
5178                 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
5179
5180                 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5181                 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5182         }
5183
5184         ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5185         if (ret)
5186                 drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
5187
5188         return ret;
5189 }
5190
5191 static int
5192 fill_plane_buffer_attributes(struct amdgpu_device *adev,
5193                              const struct amdgpu_framebuffer *afb,
5194                              const enum surface_pixel_format format,
5195                              const enum dc_rotation_angle rotation,
5196                              const uint64_t tiling_flags,
5197                              union dc_tiling_info *tiling_info,
5198                              struct plane_size *plane_size,
5199                              struct dc_plane_dcc_param *dcc,
5200                              struct dc_plane_address *address,
5201                              bool tmz_surface,
5202                              bool force_disable_dcc)
5203 {
5204         const struct drm_framebuffer *fb = &afb->base;
5205         int ret;
5206
5207         memset(tiling_info, 0, sizeof(*tiling_info));
5208         memset(plane_size, 0, sizeof(*plane_size));
5209         memset(dcc, 0, sizeof(*dcc));
5210         memset(address, 0, sizeof(*address));
5211
5212         address->tmz_surface = tmz_surface;
5213
5214         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
5215                 uint64_t addr = afb->address + fb->offsets[0];
5216
5217                 plane_size->surface_size.x = 0;
5218                 plane_size->surface_size.y = 0;
5219                 plane_size->surface_size.width = fb->width;
5220                 plane_size->surface_size.height = fb->height;
5221                 plane_size->surface_pitch =
5222                         fb->pitches[0] / fb->format->cpp[0];
5223
5224                 address->type = PLN_ADDR_TYPE_GRAPHICS;
5225                 address->grph.addr.low_part = lower_32_bits(addr);
5226                 address->grph.addr.high_part = upper_32_bits(addr);
5227         } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
5228                 uint64_t luma_addr = afb->address + fb->offsets[0];
5229                 uint64_t chroma_addr = afb->address + fb->offsets[1];
5230
5231                 plane_size->surface_size.x = 0;
5232                 plane_size->surface_size.y = 0;
5233                 plane_size->surface_size.width = fb->width;
5234                 plane_size->surface_size.height = fb->height;
5235                 plane_size->surface_pitch =
5236                         fb->pitches[0] / fb->format->cpp[0];
5237
5238                 plane_size->chroma_size.x = 0;
5239                 plane_size->chroma_size.y = 0;
5240                 /* TODO: set these based on surface format */
5241                 plane_size->chroma_size.width = fb->width / 2;
5242                 plane_size->chroma_size.height = fb->height / 2;
5243
5244                 plane_size->chroma_pitch =
5245                         fb->pitches[1] / fb->format->cpp[1];
5246
5247                 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5248                 address->video_progressive.luma_addr.low_part =
5249                         lower_32_bits(luma_addr);
5250                 address->video_progressive.luma_addr.high_part =
5251                         upper_32_bits(luma_addr);
5252                 address->video_progressive.chroma_addr.low_part =
5253                         lower_32_bits(chroma_addr);
5254                 address->video_progressive.chroma_addr.high_part =
5255                         upper_32_bits(chroma_addr);
5256         }
5257
5258         if (adev->family >= AMDGPU_FAMILY_AI) {
5259                 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5260                                                                 rotation, plane_size,
5261                                                                 tiling_info, dcc,
5262                                                                 address,
5263                                                                 force_disable_dcc);
5264                 if (ret)
5265                         return ret;
5266         } else {
5267                 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
5268         }
5269
5270         return 0;
5271 }
5272
5273 static void
5274 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
5275                                bool *per_pixel_alpha, bool *global_alpha,
5276                                int *global_alpha_value)
5277 {
5278         *per_pixel_alpha = false;
5279         *global_alpha = false;
5280         *global_alpha_value = 0xff;
5281
5282         if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5283                 return;
5284
5285         if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
5286                 static const uint32_t alpha_formats[] = {
5287                         DRM_FORMAT_ARGB8888,
5288                         DRM_FORMAT_RGBA8888,
5289                         DRM_FORMAT_ABGR8888,
5290                 };
5291                 uint32_t format = plane_state->fb->format->format;
5292                 unsigned int i;
5293
5294                 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5295                         if (format == alpha_formats[i]) {
5296                                 *per_pixel_alpha = true;
5297                                 break;
5298                         }
5299                 }
5300         }
5301
5302         if (plane_state->alpha < 0xffff) {
5303                 *global_alpha = true;
5304                 *global_alpha_value = plane_state->alpha >> 8;
5305         }
5306 }
5307
5308 static int
5309 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5310                             const enum surface_pixel_format format,
5311                             enum dc_color_space *color_space)
5312 {
5313         bool full_range;
5314
5315         *color_space = COLOR_SPACE_SRGB;
5316
5317         /* DRM color properties only affect non-RGB formats. */
5318         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5319                 return 0;
5320
5321         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5322
5323         switch (plane_state->color_encoding) {
5324         case DRM_COLOR_YCBCR_BT601:
5325                 if (full_range)
5326                         *color_space = COLOR_SPACE_YCBCR601;
5327                 else
5328                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
5329                 break;
5330
5331         case DRM_COLOR_YCBCR_BT709:
5332                 if (full_range)
5333                         *color_space = COLOR_SPACE_YCBCR709;
5334                 else
5335                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
5336                 break;
5337
5338         case DRM_COLOR_YCBCR_BT2020:
5339                 if (full_range)
5340                         *color_space = COLOR_SPACE_2020_YCBCR;
5341                 else
5342                         return -EINVAL;
5343                 break;
5344
5345         default:
5346                 return -EINVAL;
5347         }
5348
5349         return 0;
5350 }
5351
5352 static int
5353 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5354                             const struct drm_plane_state *plane_state,
5355                             const uint64_t tiling_flags,
5356                             struct dc_plane_info *plane_info,
5357                             struct dc_plane_address *address,
5358                             bool tmz_surface,
5359                             bool force_disable_dcc)
5360 {
5361         const struct drm_framebuffer *fb = plane_state->fb;
5362         const struct amdgpu_framebuffer *afb =
5363                 to_amdgpu_framebuffer(plane_state->fb);
5364         int ret;
5365
5366         memset(plane_info, 0, sizeof(*plane_info));
5367
5368         switch (fb->format->format) {
5369         case DRM_FORMAT_C8:
5370                 plane_info->format =
5371                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5372                 break;
5373         case DRM_FORMAT_RGB565:
5374                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5375                 break;
5376         case DRM_FORMAT_XRGB8888:
5377         case DRM_FORMAT_ARGB8888:
5378                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5379                 break;
5380         case DRM_FORMAT_XRGB2101010:
5381         case DRM_FORMAT_ARGB2101010:
5382                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5383                 break;
5384         case DRM_FORMAT_XBGR2101010:
5385         case DRM_FORMAT_ABGR2101010:
5386                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5387                 break;
5388         case DRM_FORMAT_XBGR8888:
5389         case DRM_FORMAT_ABGR8888:
5390                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5391                 break;
5392         case DRM_FORMAT_NV21:
5393                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5394                 break;
5395         case DRM_FORMAT_NV12:
5396                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5397                 break;
5398         case DRM_FORMAT_P010:
5399                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5400                 break;
5401         case DRM_FORMAT_XRGB16161616F:
5402         case DRM_FORMAT_ARGB16161616F:
5403                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5404                 break;
5405         case DRM_FORMAT_XBGR16161616F:
5406         case DRM_FORMAT_ABGR16161616F:
5407                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5408                 break;
5409         case DRM_FORMAT_XRGB16161616:
5410         case DRM_FORMAT_ARGB16161616:
5411                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5412                 break;
5413         case DRM_FORMAT_XBGR16161616:
5414         case DRM_FORMAT_ABGR16161616:
5415                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5416                 break;
5417         default:
5418                 DRM_ERROR(
5419                         "Unsupported screen format %p4cc\n",
5420                         &fb->format->format);
5421                 return -EINVAL;
5422         }
5423
5424         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5425         case DRM_MODE_ROTATE_0:
5426                 plane_info->rotation = ROTATION_ANGLE_0;
5427                 break;
5428         case DRM_MODE_ROTATE_90:
5429                 plane_info->rotation = ROTATION_ANGLE_90;
5430                 break;
5431         case DRM_MODE_ROTATE_180:
5432                 plane_info->rotation = ROTATION_ANGLE_180;
5433                 break;
5434         case DRM_MODE_ROTATE_270:
5435                 plane_info->rotation = ROTATION_ANGLE_270;
5436                 break;
5437         default:
5438                 plane_info->rotation = ROTATION_ANGLE_0;
5439                 break;
5440         }
5441
5442         plane_info->visible = true;
5443         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5444
5445         plane_info->layer_index = 0;
5446
5447         ret = fill_plane_color_attributes(plane_state, plane_info->format,
5448                                           &plane_info->color_space);
5449         if (ret)
5450                 return ret;
5451
5452         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5453                                            plane_info->rotation, tiling_flags,
5454                                            &plane_info->tiling_info,
5455                                            &plane_info->plane_size,
5456                                            &plane_info->dcc, address, tmz_surface,
5457                                            force_disable_dcc);
5458         if (ret)
5459                 return ret;
5460
5461         fill_blending_from_plane_state(
5462                 plane_state, &plane_info->per_pixel_alpha,
5463                 &plane_info->global_alpha, &plane_info->global_alpha_value);
5464
5465         return 0;
5466 }
5467
5468 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5469                                     struct dc_plane_state *dc_plane_state,
5470                                     struct drm_plane_state *plane_state,
5471                                     struct drm_crtc_state *crtc_state)
5472 {
5473         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5474         struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5475         struct dc_scaling_info scaling_info;
5476         struct dc_plane_info plane_info;
5477         int ret;
5478         bool force_disable_dcc = false;
5479
5480         ret = fill_dc_scaling_info(plane_state, &scaling_info);
5481         if (ret)
5482                 return ret;
5483
5484         dc_plane_state->src_rect = scaling_info.src_rect;
5485         dc_plane_state->dst_rect = scaling_info.dst_rect;
5486         dc_plane_state->clip_rect = scaling_info.clip_rect;
5487         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5488
5489         force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5490         ret = fill_dc_plane_info_and_addr(adev, plane_state,
5491                                           afb->tiling_flags,
5492                                           &plane_info,
5493                                           &dc_plane_state->address,
5494                                           afb->tmz_surface,
5495                                           force_disable_dcc);
5496         if (ret)
5497                 return ret;
5498
5499         dc_plane_state->format = plane_info.format;
5500         dc_plane_state->color_space = plane_info.color_space;
5501         dc_plane_state->format = plane_info.format;
5502         dc_plane_state->plane_size = plane_info.plane_size;
5503         dc_plane_state->rotation = plane_info.rotation;
5504         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5505         dc_plane_state->stereo_format = plane_info.stereo_format;
5506         dc_plane_state->tiling_info = plane_info.tiling_info;
5507         dc_plane_state->visible = plane_info.visible;
5508         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5509         dc_plane_state->global_alpha = plane_info.global_alpha;
5510         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5511         dc_plane_state->dcc = plane_info.dcc;
5512         dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5513         dc_plane_state->flip_int_enabled = true;
5514
5515         /*
5516          * Always set input transfer function, since plane state is refreshed
5517          * every time.
5518          */
5519         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5520         if (ret)
5521                 return ret;
5522
5523         return 0;
5524 }
5525
5526 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5527                                            const struct dm_connector_state *dm_state,
5528                                            struct dc_stream_state *stream)
5529 {
5530         enum amdgpu_rmx_type rmx_type;
5531
5532         struct rect src = { 0 }; /* viewport in composition space*/
5533         struct rect dst = { 0 }; /* stream addressable area */
5534
5535         /* no mode. nothing to be done */
5536         if (!mode)
5537                 return;
5538
5539         /* Full screen scaling by default */
5540         src.width = mode->hdisplay;
5541         src.height = mode->vdisplay;
5542         dst.width = stream->timing.h_addressable;
5543         dst.height = stream->timing.v_addressable;
5544
5545         if (dm_state) {
5546                 rmx_type = dm_state->scaling;
5547                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5548                         if (src.width * dst.height <
5549                                         src.height * dst.width) {
5550                                 /* height needs less upscaling/more downscaling */
5551                                 dst.width = src.width *
5552                                                 dst.height / src.height;
5553                         } else {
5554                                 /* width needs less upscaling/more downscaling */
5555                                 dst.height = src.height *
5556                                                 dst.width / src.width;
5557                         }
5558                 } else if (rmx_type == RMX_CENTER) {
5559                         dst = src;
5560                 }
5561
5562                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5563                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
5564
5565                 if (dm_state->underscan_enable) {
5566                         dst.x += dm_state->underscan_hborder / 2;
5567                         dst.y += dm_state->underscan_vborder / 2;
5568                         dst.width -= dm_state->underscan_hborder;
5569                         dst.height -= dm_state->underscan_vborder;
5570                 }
5571         }
5572
5573         stream->src = src;
5574         stream->dst = dst;
5575
5576         DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5577                       dst.x, dst.y, dst.width, dst.height);
5578
5579 }
5580
5581 static enum dc_color_depth
5582 convert_color_depth_from_display_info(const struct drm_connector *connector,
5583                                       bool is_y420, int requested_bpc)
5584 {
5585         uint8_t bpc;
5586
5587         if (is_y420) {
5588                 bpc = 8;
5589
5590                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5591                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5592                         bpc = 16;
5593                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5594                         bpc = 12;
5595                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5596                         bpc = 10;
5597         } else {
5598                 bpc = (uint8_t)connector->display_info.bpc;
5599                 /* Assume 8 bpc by default if no bpc is specified. */
5600                 bpc = bpc ? bpc : 8;
5601         }
5602
5603         if (requested_bpc > 0) {
5604                 /*
5605                  * Cap display bpc based on the user requested value.
5606                  *
5607                  * The value for state->max_bpc may not correctly updated
5608                  * depending on when the connector gets added to the state
5609                  * or if this was called outside of atomic check, so it
5610                  * can't be used directly.
5611                  */
5612                 bpc = min_t(u8, bpc, requested_bpc);
5613
5614                 /* Round down to the nearest even number. */
5615                 bpc = bpc - (bpc & 1);
5616         }
5617
5618         switch (bpc) {
5619         case 0:
5620                 /*
5621                  * Temporary Work around, DRM doesn't parse color depth for
5622                  * EDID revision before 1.4
5623                  * TODO: Fix edid parsing
5624                  */
5625                 return COLOR_DEPTH_888;
5626         case 6:
5627                 return COLOR_DEPTH_666;
5628         case 8:
5629                 return COLOR_DEPTH_888;
5630         case 10:
5631                 return COLOR_DEPTH_101010;
5632         case 12:
5633                 return COLOR_DEPTH_121212;
5634         case 14:
5635                 return COLOR_DEPTH_141414;
5636         case 16:
5637                 return COLOR_DEPTH_161616;
5638         default:
5639                 return COLOR_DEPTH_UNDEFINED;
5640         }
5641 }
5642
5643 static enum dc_aspect_ratio
5644 get_aspect_ratio(const struct drm_display_mode *mode_in)
5645 {
5646         /* 1-1 mapping, since both enums follow the HDMI spec. */
5647         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5648 }
5649
5650 static enum dc_color_space
5651 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5652 {
5653         enum dc_color_space color_space = COLOR_SPACE_SRGB;
5654
5655         switch (dc_crtc_timing->pixel_encoding) {
5656         case PIXEL_ENCODING_YCBCR422:
5657         case PIXEL_ENCODING_YCBCR444:
5658         case PIXEL_ENCODING_YCBCR420:
5659         {
5660                 /*
5661                  * 27030khz is the separation point between HDTV and SDTV
5662                  * according to HDMI spec, we use YCbCr709 and YCbCr601
5663                  * respectively
5664                  */
5665                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
5666                         if (dc_crtc_timing->flags.Y_ONLY)
5667                                 color_space =
5668                                         COLOR_SPACE_YCBCR709_LIMITED;
5669                         else
5670                                 color_space = COLOR_SPACE_YCBCR709;
5671                 } else {
5672                         if (dc_crtc_timing->flags.Y_ONLY)
5673                                 color_space =
5674                                         COLOR_SPACE_YCBCR601_LIMITED;
5675                         else
5676                                 color_space = COLOR_SPACE_YCBCR601;
5677                 }
5678
5679         }
5680         break;
5681         case PIXEL_ENCODING_RGB:
5682                 color_space = COLOR_SPACE_SRGB;
5683                 break;
5684
5685         default:
5686                 WARN_ON(1);
5687                 break;
5688         }
5689
5690         return color_space;
5691 }
5692
5693 static bool adjust_colour_depth_from_display_info(
5694         struct dc_crtc_timing *timing_out,
5695         const struct drm_display_info *info)
5696 {
5697         enum dc_color_depth depth = timing_out->display_color_depth;
5698         int normalized_clk;
5699         do {
5700                 normalized_clk = timing_out->pix_clk_100hz / 10;
5701                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5702                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5703                         normalized_clk /= 2;
5704                 /* Adjusting pix clock following on HDMI spec based on colour depth */
5705                 switch (depth) {
5706                 case COLOR_DEPTH_888:
5707                         break;
5708                 case COLOR_DEPTH_101010:
5709                         normalized_clk = (normalized_clk * 30) / 24;
5710                         break;
5711                 case COLOR_DEPTH_121212:
5712                         normalized_clk = (normalized_clk * 36) / 24;
5713                         break;
5714                 case COLOR_DEPTH_161616:
5715                         normalized_clk = (normalized_clk * 48) / 24;
5716                         break;
5717                 default:
5718                         /* The above depths are the only ones valid for HDMI. */
5719                         return false;
5720                 }
5721                 if (normalized_clk <= info->max_tmds_clock) {
5722                         timing_out->display_color_depth = depth;
5723                         return true;
5724                 }
5725         } while (--depth > COLOR_DEPTH_666);
5726         return false;
5727 }
5728
5729 static void fill_stream_properties_from_drm_display_mode(
5730         struct dc_stream_state *stream,
5731         const struct drm_display_mode *mode_in,
5732         const struct drm_connector *connector,
5733         const struct drm_connector_state *connector_state,
5734         const struct dc_stream_state *old_stream,
5735         int requested_bpc)
5736 {
5737         struct dc_crtc_timing *timing_out = &stream->timing;
5738         const struct drm_display_info *info = &connector->display_info;
5739         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5740         struct hdmi_vendor_infoframe hv_frame;
5741         struct hdmi_avi_infoframe avi_frame;
5742
5743         memset(&hv_frame, 0, sizeof(hv_frame));
5744         memset(&avi_frame, 0, sizeof(avi_frame));
5745
5746         timing_out->h_border_left = 0;
5747         timing_out->h_border_right = 0;
5748         timing_out->v_border_top = 0;
5749         timing_out->v_border_bottom = 0;
5750         /* TODO: un-hardcode */
5751         if (drm_mode_is_420_only(info, mode_in)
5752                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5753                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5754         else if (drm_mode_is_420_also(info, mode_in)
5755                         && aconnector->force_yuv420_output)
5756                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5757         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5758                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5759                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5760         else
5761                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5762
5763         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5764         timing_out->display_color_depth = convert_color_depth_from_display_info(
5765                 connector,
5766                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5767                 requested_bpc);
5768         timing_out->scan_type = SCANNING_TYPE_NODATA;
5769         timing_out->hdmi_vic = 0;
5770
5771         if(old_stream) {
5772                 timing_out->vic = old_stream->timing.vic;
5773                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5774                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5775         } else {
5776                 timing_out->vic = drm_match_cea_mode(mode_in);
5777                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5778                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5779                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5780                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5781         }
5782
5783         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5784                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5785                 timing_out->vic = avi_frame.video_code;
5786                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5787                 timing_out->hdmi_vic = hv_frame.vic;
5788         }
5789
5790         if (is_freesync_video_mode(mode_in, aconnector)) {
5791                 timing_out->h_addressable = mode_in->hdisplay;
5792                 timing_out->h_total = mode_in->htotal;
5793                 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5794                 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5795                 timing_out->v_total = mode_in->vtotal;
5796                 timing_out->v_addressable = mode_in->vdisplay;
5797                 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5798                 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5799                 timing_out->pix_clk_100hz = mode_in->clock * 10;
5800         } else {
5801                 timing_out->h_addressable = mode_in->crtc_hdisplay;
5802                 timing_out->h_total = mode_in->crtc_htotal;
5803                 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5804                 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5805                 timing_out->v_total = mode_in->crtc_vtotal;
5806                 timing_out->v_addressable = mode_in->crtc_vdisplay;
5807                 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5808                 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5809                 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5810         }
5811
5812         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5813
5814         stream->output_color_space = get_output_color_space(timing_out);
5815
5816         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5817         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5818         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5819                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5820                     drm_mode_is_420_also(info, mode_in) &&
5821                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5822                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5823                         adjust_colour_depth_from_display_info(timing_out, info);
5824                 }
5825         }
5826 }
5827
5828 static void fill_audio_info(struct audio_info *audio_info,
5829                             const struct drm_connector *drm_connector,
5830                             const struct dc_sink *dc_sink)
5831 {
5832         int i = 0;
5833         int cea_revision = 0;
5834         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5835
5836         audio_info->manufacture_id = edid_caps->manufacturer_id;
5837         audio_info->product_id = edid_caps->product_id;
5838
5839         cea_revision = drm_connector->display_info.cea_rev;
5840
5841         strscpy(audio_info->display_name,
5842                 edid_caps->display_name,
5843                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5844
5845         if (cea_revision >= 3) {
5846                 audio_info->mode_count = edid_caps->audio_mode_count;
5847
5848                 for (i = 0; i < audio_info->mode_count; ++i) {
5849                         audio_info->modes[i].format_code =
5850                                         (enum audio_format_code)
5851                                         (edid_caps->audio_modes[i].format_code);
5852                         audio_info->modes[i].channel_count =
5853                                         edid_caps->audio_modes[i].channel_count;
5854                         audio_info->modes[i].sample_rates.all =
5855                                         edid_caps->audio_modes[i].sample_rate;
5856                         audio_info->modes[i].sample_size =
5857                                         edid_caps->audio_modes[i].sample_size;
5858                 }
5859         }
5860
5861         audio_info->flags.all = edid_caps->speaker_flags;
5862
5863         /* TODO: We only check for the progressive mode, check for interlace mode too */
5864         if (drm_connector->latency_present[0]) {
5865                 audio_info->video_latency = drm_connector->video_latency[0];
5866                 audio_info->audio_latency = drm_connector->audio_latency[0];
5867         }
5868
5869         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5870
5871 }
5872
5873 static void
5874 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5875                                       struct drm_display_mode *dst_mode)
5876 {
5877         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5878         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5879         dst_mode->crtc_clock = src_mode->crtc_clock;
5880         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5881         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5882         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5883         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5884         dst_mode->crtc_htotal = src_mode->crtc_htotal;
5885         dst_mode->crtc_hskew = src_mode->crtc_hskew;
5886         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5887         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5888         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5889         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5890         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5891 }
5892
5893 static void
5894 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5895                                         const struct drm_display_mode *native_mode,
5896                                         bool scale_enabled)
5897 {
5898         if (scale_enabled) {
5899                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5900         } else if (native_mode->clock == drm_mode->clock &&
5901                         native_mode->htotal == drm_mode->htotal &&
5902                         native_mode->vtotal == drm_mode->vtotal) {
5903                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5904         } else {
5905                 /* no scaling nor amdgpu inserted, no need to patch */
5906         }
5907 }
5908
5909 static struct dc_sink *
5910 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5911 {
5912         struct dc_sink_init_data sink_init_data = { 0 };
5913         struct dc_sink *sink = NULL;
5914         sink_init_data.link = aconnector->dc_link;
5915         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5916
5917         sink = dc_sink_create(&sink_init_data);
5918         if (!sink) {
5919                 DRM_ERROR("Failed to create sink!\n");
5920                 return NULL;
5921         }
5922         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5923
5924         return sink;
5925 }
5926
5927 static void set_multisync_trigger_params(
5928                 struct dc_stream_state *stream)
5929 {
5930         struct dc_stream_state *master = NULL;
5931
5932         if (stream->triggered_crtc_reset.enabled) {
5933                 master = stream->triggered_crtc_reset.event_source;
5934                 stream->triggered_crtc_reset.event =
5935                         master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5936                         CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5937                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5938         }
5939 }
5940
5941 static void set_master_stream(struct dc_stream_state *stream_set[],
5942                               int stream_count)
5943 {
5944         int j, highest_rfr = 0, master_stream = 0;
5945
5946         for (j = 0;  j < stream_count; j++) {
5947                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5948                         int refresh_rate = 0;
5949
5950                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5951                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5952                         if (refresh_rate > highest_rfr) {
5953                                 highest_rfr = refresh_rate;
5954                                 master_stream = j;
5955                         }
5956                 }
5957         }
5958         for (j = 0;  j < stream_count; j++) {
5959                 if (stream_set[j])
5960                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5961         }
5962 }
5963
5964 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5965 {
5966         int i = 0;
5967         struct dc_stream_state *stream;
5968
5969         if (context->stream_count < 2)
5970                 return;
5971         for (i = 0; i < context->stream_count ; i++) {
5972                 if (!context->streams[i])
5973                         continue;
5974                 /*
5975                  * TODO: add a function to read AMD VSDB bits and set
5976                  * crtc_sync_master.multi_sync_enabled flag
5977                  * For now it's set to false
5978                  */
5979         }
5980
5981         set_master_stream(context->streams, context->stream_count);
5982
5983         for (i = 0; i < context->stream_count ; i++) {
5984                 stream = context->streams[i];
5985
5986                 if (!stream)
5987                         continue;
5988
5989                 set_multisync_trigger_params(stream);
5990         }
5991 }
5992
5993 #if defined(CONFIG_DRM_AMD_DC_DCN)
5994 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
5995                                                         struct dc_sink *sink, struct dc_stream_state *stream,
5996                                                         struct dsc_dec_dpcd_caps *dsc_caps)
5997 {
5998         stream->timing.flags.DSC = 0;
5999         dsc_caps->is_dsc_supported = false;
6000
6001         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6002                 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6003                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6004                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6005                                       dsc_caps);
6006         }
6007 }
6008
6009 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6010                                                                                 struct dc_sink *sink, struct dc_stream_state *stream,
6011                                                                                 struct dsc_dec_dpcd_caps *dsc_caps)
6012 {
6013         struct drm_connector *drm_connector = &aconnector->base;
6014         uint32_t link_bandwidth_kbps;
6015
6016         link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6017                                                         dc_link_get_link_cap(aconnector->dc_link));
6018         /* Set DSC policy according to dsc_clock_en */
6019         dc_dsc_policy_set_enable_dsc_when_not_needed(
6020                 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6021
6022         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6023
6024                 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6025                                                 dsc_caps,
6026                                                 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6027                                                 0,
6028                                                 link_bandwidth_kbps,
6029                                                 &stream->timing,
6030                                                 &stream->timing.dsc_cfg)) {
6031                         stream->timing.flags.DSC = 1;
6032                         DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
6033                 }
6034         }
6035
6036         /* Overwrite the stream flag if DSC is enabled through debugfs */
6037         if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6038                 stream->timing.flags.DSC = 1;
6039
6040         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6041                 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6042
6043         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6044                 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6045
6046         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6047                 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6048 }
6049 #endif
6050
6051 /**
6052  * DOC: FreeSync Video
6053  *
6054  * When a userspace application wants to play a video, the content follows a
6055  * standard format definition that usually specifies the FPS for that format.
6056  * The below list illustrates some video format and the expected FPS,
6057  * respectively:
6058  *
6059  * - TV/NTSC (23.976 FPS)
6060  * - Cinema (24 FPS)
6061  * - TV/PAL (25 FPS)
6062  * - TV/NTSC (29.97 FPS)
6063  * - TV/NTSC (30 FPS)
6064  * - Cinema HFR (48 FPS)
6065  * - TV/PAL (50 FPS)
6066  * - Commonly used (60 FPS)
6067  * - Multiples of 24 (48,72,96 FPS)
6068  *
6069  * The list of standards video format is not huge and can be added to the
6070  * connector modeset list beforehand. With that, userspace can leverage
6071  * FreeSync to extends the front porch in order to attain the target refresh
6072  * rate. Such a switch will happen seamlessly, without screen blanking or
6073  * reprogramming of the output in any other way. If the userspace requests a
6074  * modesetting change compatible with FreeSync modes that only differ in the
6075  * refresh rate, DC will skip the full update and avoid blink during the
6076  * transition. For example, the video player can change the modesetting from
6077  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6078  * causing any display blink. This same concept can be applied to a mode
6079  * setting change.
6080  */
6081 static struct drm_display_mode *
6082 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6083                           bool use_probed_modes)
6084 {
6085         struct drm_display_mode *m, *m_pref = NULL;
6086         u16 current_refresh, highest_refresh;
6087         struct list_head *list_head = use_probed_modes ?
6088                                                     &aconnector->base.probed_modes :
6089                                                     &aconnector->base.modes;
6090
6091         if (aconnector->freesync_vid_base.clock != 0)
6092                 return &aconnector->freesync_vid_base;
6093
6094         /* Find the preferred mode */
6095         list_for_each_entry (m, list_head, head) {
6096                 if (m->type & DRM_MODE_TYPE_PREFERRED) {
6097                         m_pref = m;
6098                         break;
6099                 }
6100         }
6101
6102         if (!m_pref) {
6103                 /* Probably an EDID with no preferred mode. Fallback to first entry */
6104                 m_pref = list_first_entry_or_null(
6105                         &aconnector->base.modes, struct drm_display_mode, head);
6106                 if (!m_pref) {
6107                         DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6108                         return NULL;
6109                 }
6110         }
6111
6112         highest_refresh = drm_mode_vrefresh(m_pref);
6113
6114         /*
6115          * Find the mode with highest refresh rate with same resolution.
6116          * For some monitors, preferred mode is not the mode with highest
6117          * supported refresh rate.
6118          */
6119         list_for_each_entry (m, list_head, head) {
6120                 current_refresh  = drm_mode_vrefresh(m);
6121
6122                 if (m->hdisplay == m_pref->hdisplay &&
6123                     m->vdisplay == m_pref->vdisplay &&
6124                     highest_refresh < current_refresh) {
6125                         highest_refresh = current_refresh;
6126                         m_pref = m;
6127                 }
6128         }
6129
6130         aconnector->freesync_vid_base = *m_pref;
6131         return m_pref;
6132 }
6133
6134 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6135                                    struct amdgpu_dm_connector *aconnector)
6136 {
6137         struct drm_display_mode *high_mode;
6138         int timing_diff;
6139
6140         high_mode = get_highest_refresh_rate_mode(aconnector, false);
6141         if (!high_mode || !mode)
6142                 return false;
6143
6144         timing_diff = high_mode->vtotal - mode->vtotal;
6145
6146         if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6147             high_mode->hdisplay != mode->hdisplay ||
6148             high_mode->vdisplay != mode->vdisplay ||
6149             high_mode->hsync_start != mode->hsync_start ||
6150             high_mode->hsync_end != mode->hsync_end ||
6151             high_mode->htotal != mode->htotal ||
6152             high_mode->hskew != mode->hskew ||
6153             high_mode->vscan != mode->vscan ||
6154             high_mode->vsync_start - mode->vsync_start != timing_diff ||
6155             high_mode->vsync_end - mode->vsync_end != timing_diff)
6156                 return false;
6157         else
6158                 return true;
6159 }
6160
6161 static struct dc_stream_state *
6162 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6163                        const struct drm_display_mode *drm_mode,
6164                        const struct dm_connector_state *dm_state,
6165                        const struct dc_stream_state *old_stream,
6166                        int requested_bpc)
6167 {
6168         struct drm_display_mode *preferred_mode = NULL;
6169         struct drm_connector *drm_connector;
6170         const struct drm_connector_state *con_state =
6171                 dm_state ? &dm_state->base : NULL;
6172         struct dc_stream_state *stream = NULL;
6173         struct drm_display_mode mode = *drm_mode;
6174         struct drm_display_mode saved_mode;
6175         struct drm_display_mode *freesync_mode = NULL;
6176         bool native_mode_found = false;
6177         bool recalculate_timing = false;
6178         bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
6179         int mode_refresh;
6180         int preferred_refresh = 0;
6181 #if defined(CONFIG_DRM_AMD_DC_DCN)
6182         struct dsc_dec_dpcd_caps dsc_caps;
6183 #endif
6184         struct dc_sink *sink = NULL;
6185
6186         memset(&saved_mode, 0, sizeof(saved_mode));
6187
6188         if (aconnector == NULL) {
6189                 DRM_ERROR("aconnector is NULL!\n");
6190                 return stream;
6191         }
6192
6193         drm_connector = &aconnector->base;
6194
6195         if (!aconnector->dc_sink) {
6196                 sink = create_fake_sink(aconnector);
6197                 if (!sink)
6198                         return stream;
6199         } else {
6200                 sink = aconnector->dc_sink;
6201                 dc_sink_retain(sink);
6202         }
6203
6204         stream = dc_create_stream_for_sink(sink);
6205
6206         if (stream == NULL) {
6207                 DRM_ERROR("Failed to create stream for sink!\n");
6208                 goto finish;
6209         }
6210
6211         stream->dm_stream_context = aconnector;
6212
6213         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6214                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6215
6216         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6217                 /* Search for preferred mode */
6218                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6219                         native_mode_found = true;
6220                         break;
6221                 }
6222         }
6223         if (!native_mode_found)
6224                 preferred_mode = list_first_entry_or_null(
6225                                 &aconnector->base.modes,
6226                                 struct drm_display_mode,
6227                                 head);
6228
6229         mode_refresh = drm_mode_vrefresh(&mode);
6230
6231         if (preferred_mode == NULL) {
6232                 /*
6233                  * This may not be an error, the use case is when we have no
6234                  * usermode calls to reset and set mode upon hotplug. In this
6235                  * case, we call set mode ourselves to restore the previous mode
6236                  * and the modelist may not be filled in in time.
6237                  */
6238                 DRM_DEBUG_DRIVER("No preferred mode found\n");
6239         } else {
6240                 recalculate_timing = amdgpu_freesync_vid_mode &&
6241                                  is_freesync_video_mode(&mode, aconnector);
6242                 if (recalculate_timing) {
6243                         freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6244                         saved_mode = mode;
6245                         mode = *freesync_mode;
6246                 } else {
6247                         decide_crtc_timing_for_drm_display_mode(
6248                                 &mode, preferred_mode, scale);
6249
6250                         preferred_refresh = drm_mode_vrefresh(preferred_mode);
6251                 }
6252         }
6253
6254         if (recalculate_timing)
6255                 drm_mode_set_crtcinfo(&saved_mode, 0);
6256         else if (!dm_state)
6257                 drm_mode_set_crtcinfo(&mode, 0);
6258
6259        /*
6260         * If scaling is enabled and refresh rate didn't change
6261         * we copy the vic and polarities of the old timings
6262         */
6263         if (!scale || mode_refresh != preferred_refresh)
6264                 fill_stream_properties_from_drm_display_mode(
6265                         stream, &mode, &aconnector->base, con_state, NULL,
6266                         requested_bpc);
6267         else
6268                 fill_stream_properties_from_drm_display_mode(
6269                         stream, &mode, &aconnector->base, con_state, old_stream,
6270                         requested_bpc);
6271
6272 #if defined(CONFIG_DRM_AMD_DC_DCN)
6273         /* SST DSC determination policy */
6274         update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6275         if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6276                 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6277 #endif
6278
6279         update_stream_scaling_settings(&mode, dm_state, stream);
6280
6281         fill_audio_info(
6282                 &stream->audio_info,
6283                 drm_connector,
6284                 sink);
6285
6286         update_stream_signal(stream, sink);
6287
6288         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6289                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6290
6291         if (stream->link->psr_settings.psr_feature_enabled) {
6292                 //
6293                 // should decide stream support vsc sdp colorimetry capability
6294                 // before building vsc info packet
6295                 //
6296                 stream->use_vsc_sdp_for_colorimetry = false;
6297                 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6298                         stream->use_vsc_sdp_for_colorimetry =
6299                                 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6300                 } else {
6301                         if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6302                                 stream->use_vsc_sdp_for_colorimetry = true;
6303                 }
6304                 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
6305                 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6306
6307         }
6308 finish:
6309         dc_sink_release(sink);
6310
6311         return stream;
6312 }
6313
6314 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
6315 {
6316         drm_crtc_cleanup(crtc);
6317         kfree(crtc);
6318 }
6319
6320 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
6321                                   struct drm_crtc_state *state)
6322 {
6323         struct dm_crtc_state *cur = to_dm_crtc_state(state);
6324
6325         /* TODO Destroy dc_stream objects are stream object is flattened */
6326         if (cur->stream)
6327                 dc_stream_release(cur->stream);
6328
6329
6330         __drm_atomic_helper_crtc_destroy_state(state);
6331
6332
6333         kfree(state);
6334 }
6335
6336 static void dm_crtc_reset_state(struct drm_crtc *crtc)
6337 {
6338         struct dm_crtc_state *state;
6339
6340         if (crtc->state)
6341                 dm_crtc_destroy_state(crtc, crtc->state);
6342
6343         state = kzalloc(sizeof(*state), GFP_KERNEL);
6344         if (WARN_ON(!state))
6345                 return;
6346
6347         __drm_atomic_helper_crtc_reset(crtc, &state->base);
6348 }
6349
6350 static struct drm_crtc_state *
6351 dm_crtc_duplicate_state(struct drm_crtc *crtc)
6352 {
6353         struct dm_crtc_state *state, *cur;
6354
6355         cur = to_dm_crtc_state(crtc->state);
6356
6357         if (WARN_ON(!crtc->state))
6358                 return NULL;
6359
6360         state = kzalloc(sizeof(*state), GFP_KERNEL);
6361         if (!state)
6362                 return NULL;
6363
6364         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6365
6366         if (cur->stream) {
6367                 state->stream = cur->stream;
6368                 dc_stream_retain(state->stream);
6369         }
6370
6371         state->active_planes = cur->active_planes;
6372         state->vrr_infopacket = cur->vrr_infopacket;
6373         state->abm_level = cur->abm_level;
6374         state->vrr_supported = cur->vrr_supported;
6375         state->freesync_config = cur->freesync_config;
6376         state->cm_has_degamma = cur->cm_has_degamma;
6377         state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6378         /* TODO Duplicate dc_stream after objects are stream object is flattened */
6379
6380         return &state->base;
6381 }
6382
6383 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
6384 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6385 {
6386         crtc_debugfs_init(crtc);
6387
6388         return 0;
6389 }
6390 #endif
6391
6392 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6393 {
6394         enum dc_irq_source irq_source;
6395         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6396         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6397         int rc;
6398
6399         irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6400
6401         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6402
6403         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6404                       acrtc->crtc_id, enable ? "en" : "dis", rc);
6405         return rc;
6406 }
6407
6408 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6409 {
6410         enum dc_irq_source irq_source;
6411         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6412         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6413         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6414 #if defined(CONFIG_DRM_AMD_DC_DCN)
6415         struct amdgpu_display_manager *dm = &adev->dm;
6416         struct vblank_control_work *work;
6417 #endif
6418         int rc = 0;
6419
6420         if (enable) {
6421                 /* vblank irq on -> Only need vupdate irq in vrr mode */
6422                 if (amdgpu_dm_vrr_active(acrtc_state))
6423                         rc = dm_set_vupdate_irq(crtc, true);
6424         } else {
6425                 /* vblank irq off -> vupdate irq off */
6426                 rc = dm_set_vupdate_irq(crtc, false);
6427         }
6428
6429         if (rc)
6430                 return rc;
6431
6432         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6433
6434         if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6435                 return -EBUSY;
6436
6437         if (amdgpu_in_reset(adev))
6438                 return 0;
6439
6440 #if defined(CONFIG_DRM_AMD_DC_DCN)
6441         if (dm->vblank_control_workqueue) {
6442                 work = kzalloc(sizeof(*work), GFP_ATOMIC);
6443                 if (!work)
6444                         return -ENOMEM;
6445
6446                 INIT_WORK(&work->work, vblank_control_worker);
6447                 work->dm = dm;
6448                 work->acrtc = acrtc;
6449                 work->enable = enable;
6450
6451                 if (acrtc_state->stream) {
6452                         dc_stream_retain(acrtc_state->stream);
6453                         work->stream = acrtc_state->stream;
6454                 }
6455
6456                 queue_work(dm->vblank_control_workqueue, &work->work);
6457         }
6458 #endif
6459
6460         return 0;
6461 }
6462
6463 static int dm_enable_vblank(struct drm_crtc *crtc)
6464 {
6465         return dm_set_vblank(crtc, true);
6466 }
6467
6468 static void dm_disable_vblank(struct drm_crtc *crtc)
6469 {
6470         dm_set_vblank(crtc, false);
6471 }
6472
6473 /* Implemented only the options currently availible for the driver */
6474 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6475         .reset = dm_crtc_reset_state,
6476         .destroy = amdgpu_dm_crtc_destroy,
6477         .set_config = drm_atomic_helper_set_config,
6478         .page_flip = drm_atomic_helper_page_flip,
6479         .atomic_duplicate_state = dm_crtc_duplicate_state,
6480         .atomic_destroy_state = dm_crtc_destroy_state,
6481         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
6482         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6483         .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6484         .get_vblank_counter = amdgpu_get_vblank_counter_kms,
6485         .enable_vblank = dm_enable_vblank,
6486         .disable_vblank = dm_disable_vblank,
6487         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6488 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6489         .late_register = amdgpu_dm_crtc_late_register,
6490 #endif
6491 };
6492
6493 static enum drm_connector_status
6494 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6495 {
6496         bool connected;
6497         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6498
6499         /*
6500          * Notes:
6501          * 1. This interface is NOT called in context of HPD irq.
6502          * 2. This interface *is called* in context of user-mode ioctl. Which
6503          * makes it a bad place for *any* MST-related activity.
6504          */
6505
6506         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6507             !aconnector->fake_enable)
6508                 connected = (aconnector->dc_sink != NULL);
6509         else
6510                 connected = (aconnector->base.force == DRM_FORCE_ON);
6511
6512         update_subconnector_property(aconnector);
6513
6514         return (connected ? connector_status_connected :
6515                         connector_status_disconnected);
6516 }
6517
6518 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6519                                             struct drm_connector_state *connector_state,
6520                                             struct drm_property *property,
6521                                             uint64_t val)
6522 {
6523         struct drm_device *dev = connector->dev;
6524         struct amdgpu_device *adev = drm_to_adev(dev);
6525         struct dm_connector_state *dm_old_state =
6526                 to_dm_connector_state(connector->state);
6527         struct dm_connector_state *dm_new_state =
6528                 to_dm_connector_state(connector_state);
6529
6530         int ret = -EINVAL;
6531
6532         if (property == dev->mode_config.scaling_mode_property) {
6533                 enum amdgpu_rmx_type rmx_type;
6534
6535                 switch (val) {
6536                 case DRM_MODE_SCALE_CENTER:
6537                         rmx_type = RMX_CENTER;
6538                         break;
6539                 case DRM_MODE_SCALE_ASPECT:
6540                         rmx_type = RMX_ASPECT;
6541                         break;
6542                 case DRM_MODE_SCALE_FULLSCREEN:
6543                         rmx_type = RMX_FULL;
6544                         break;
6545                 case DRM_MODE_SCALE_NONE:
6546                 default:
6547                         rmx_type = RMX_OFF;
6548                         break;
6549                 }
6550
6551                 if (dm_old_state->scaling == rmx_type)
6552                         return 0;
6553
6554                 dm_new_state->scaling = rmx_type;
6555                 ret = 0;
6556         } else if (property == adev->mode_info.underscan_hborder_property) {
6557                 dm_new_state->underscan_hborder = val;
6558                 ret = 0;
6559         } else if (property == adev->mode_info.underscan_vborder_property) {
6560                 dm_new_state->underscan_vborder = val;
6561                 ret = 0;
6562         } else if (property == adev->mode_info.underscan_property) {
6563                 dm_new_state->underscan_enable = val;
6564                 ret = 0;
6565         } else if (property == adev->mode_info.abm_level_property) {
6566                 dm_new_state->abm_level = val;
6567                 ret = 0;
6568         }
6569
6570         return ret;
6571 }
6572
6573 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6574                                             const struct drm_connector_state *state,
6575                                             struct drm_property *property,
6576                                             uint64_t *val)
6577 {
6578         struct drm_device *dev = connector->dev;
6579         struct amdgpu_device *adev = drm_to_adev(dev);
6580         struct dm_connector_state *dm_state =
6581                 to_dm_connector_state(state);
6582         int ret = -EINVAL;
6583
6584         if (property == dev->mode_config.scaling_mode_property) {
6585                 switch (dm_state->scaling) {
6586                 case RMX_CENTER:
6587                         *val = DRM_MODE_SCALE_CENTER;
6588                         break;
6589                 case RMX_ASPECT:
6590                         *val = DRM_MODE_SCALE_ASPECT;
6591                         break;
6592                 case RMX_FULL:
6593                         *val = DRM_MODE_SCALE_FULLSCREEN;
6594                         break;
6595                 case RMX_OFF:
6596                 default:
6597                         *val = DRM_MODE_SCALE_NONE;
6598                         break;
6599                 }
6600                 ret = 0;
6601         } else if (property == adev->mode_info.underscan_hborder_property) {
6602                 *val = dm_state->underscan_hborder;
6603                 ret = 0;
6604         } else if (property == adev->mode_info.underscan_vborder_property) {
6605                 *val = dm_state->underscan_vborder;
6606                 ret = 0;
6607         } else if (property == adev->mode_info.underscan_property) {
6608                 *val = dm_state->underscan_enable;
6609                 ret = 0;
6610         } else if (property == adev->mode_info.abm_level_property) {
6611                 *val = dm_state->abm_level;
6612                 ret = 0;
6613         }
6614
6615         return ret;
6616 }
6617
6618 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6619 {
6620         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6621
6622         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6623 }
6624
6625 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6626 {
6627         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6628         const struct dc_link *link = aconnector->dc_link;
6629         struct amdgpu_device *adev = drm_to_adev(connector->dev);
6630         struct amdgpu_display_manager *dm = &adev->dm;
6631         int i;
6632
6633         /*
6634          * Call only if mst_mgr was iniitalized before since it's not done
6635          * for all connector types.
6636          */
6637         if (aconnector->mst_mgr.dev)
6638                 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6639
6640 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6641         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6642         for (i = 0; i < dm->num_of_edps; i++) {
6643                 if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6644                         backlight_device_unregister(dm->backlight_dev[i]);
6645                         dm->backlight_dev[i] = NULL;
6646                 }
6647         }
6648 #endif
6649
6650         if (aconnector->dc_em_sink)
6651                 dc_sink_release(aconnector->dc_em_sink);
6652         aconnector->dc_em_sink = NULL;
6653         if (aconnector->dc_sink)
6654                 dc_sink_release(aconnector->dc_sink);
6655         aconnector->dc_sink = NULL;
6656
6657         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6658         drm_connector_unregister(connector);
6659         drm_connector_cleanup(connector);
6660         if (aconnector->i2c) {
6661                 i2c_del_adapter(&aconnector->i2c->base);
6662                 kfree(aconnector->i2c);
6663         }
6664         kfree(aconnector->dm_dp_aux.aux.name);
6665
6666         kfree(connector);
6667 }
6668
6669 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6670 {
6671         struct dm_connector_state *state =
6672                 to_dm_connector_state(connector->state);
6673
6674         if (connector->state)
6675                 __drm_atomic_helper_connector_destroy_state(connector->state);
6676
6677         kfree(state);
6678
6679         state = kzalloc(sizeof(*state), GFP_KERNEL);
6680
6681         if (state) {
6682                 state->scaling = RMX_OFF;
6683                 state->underscan_enable = false;
6684                 state->underscan_hborder = 0;
6685                 state->underscan_vborder = 0;
6686                 state->base.max_requested_bpc = 8;
6687                 state->vcpi_slots = 0;
6688                 state->pbn = 0;
6689                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6690                         state->abm_level = amdgpu_dm_abm_level;
6691
6692                 __drm_atomic_helper_connector_reset(connector, &state->base);
6693         }
6694 }
6695
6696 struct drm_connector_state *
6697 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6698 {
6699         struct dm_connector_state *state =
6700                 to_dm_connector_state(connector->state);
6701
6702         struct dm_connector_state *new_state =
6703                         kmemdup(state, sizeof(*state), GFP_KERNEL);
6704
6705         if (!new_state)
6706                 return NULL;
6707
6708         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6709
6710         new_state->freesync_capable = state->freesync_capable;
6711         new_state->abm_level = state->abm_level;
6712         new_state->scaling = state->scaling;
6713         new_state->underscan_enable = state->underscan_enable;
6714         new_state->underscan_hborder = state->underscan_hborder;
6715         new_state->underscan_vborder = state->underscan_vborder;
6716         new_state->vcpi_slots = state->vcpi_slots;
6717         new_state->pbn = state->pbn;
6718         return &new_state->base;
6719 }
6720
6721 static int
6722 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6723 {
6724         struct amdgpu_dm_connector *amdgpu_dm_connector =
6725                 to_amdgpu_dm_connector(connector);
6726         int r;
6727
6728         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6729             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6730                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6731                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6732                 if (r)
6733                         return r;
6734         }
6735
6736 #if defined(CONFIG_DEBUG_FS)
6737         connector_debugfs_init(amdgpu_dm_connector);
6738 #endif
6739
6740         return 0;
6741 }
6742
6743 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6744         .reset = amdgpu_dm_connector_funcs_reset,
6745         .detect = amdgpu_dm_connector_detect,
6746         .fill_modes = drm_helper_probe_single_connector_modes,
6747         .destroy = amdgpu_dm_connector_destroy,
6748         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6749         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6750         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6751         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6752         .late_register = amdgpu_dm_connector_late_register,
6753         .early_unregister = amdgpu_dm_connector_unregister
6754 };
6755
6756 static int get_modes(struct drm_connector *connector)
6757 {
6758         return amdgpu_dm_connector_get_modes(connector);
6759 }
6760
6761 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6762 {
6763         struct dc_sink_init_data init_params = {
6764                         .link = aconnector->dc_link,
6765                         .sink_signal = SIGNAL_TYPE_VIRTUAL
6766         };
6767         struct edid *edid;
6768
6769         if (!aconnector->base.edid_blob_ptr) {
6770                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6771                                 aconnector->base.name);
6772
6773                 aconnector->base.force = DRM_FORCE_OFF;
6774                 aconnector->base.override_edid = false;
6775                 return;
6776         }
6777
6778         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6779
6780         aconnector->edid = edid;
6781
6782         aconnector->dc_em_sink = dc_link_add_remote_sink(
6783                 aconnector->dc_link,
6784                 (uint8_t *)edid,
6785                 (edid->extensions + 1) * EDID_LENGTH,
6786                 &init_params);
6787
6788         if (aconnector->base.force == DRM_FORCE_ON) {
6789                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6790                 aconnector->dc_link->local_sink :
6791                 aconnector->dc_em_sink;
6792                 dc_sink_retain(aconnector->dc_sink);
6793         }
6794 }
6795
6796 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6797 {
6798         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6799
6800         /*
6801          * In case of headless boot with force on for DP managed connector
6802          * Those settings have to be != 0 to get initial modeset
6803          */
6804         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6805                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6806                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6807         }
6808
6809
6810         aconnector->base.override_edid = true;
6811         create_eml_sink(aconnector);
6812 }
6813
6814 static struct dc_stream_state *
6815 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6816                                 const struct drm_display_mode *drm_mode,
6817                                 const struct dm_connector_state *dm_state,
6818                                 const struct dc_stream_state *old_stream)
6819 {
6820         struct drm_connector *connector = &aconnector->base;
6821         struct amdgpu_device *adev = drm_to_adev(connector->dev);
6822         struct dc_stream_state *stream;
6823         const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6824         int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6825         enum dc_status dc_result = DC_OK;
6826
6827         do {
6828                 stream = create_stream_for_sink(aconnector, drm_mode,
6829                                                 dm_state, old_stream,
6830                                                 requested_bpc);
6831                 if (stream == NULL) {
6832                         DRM_ERROR("Failed to create stream for sink!\n");
6833                         break;
6834                 }
6835
6836                 dc_result = dc_validate_stream(adev->dm.dc, stream);
6837
6838                 if (dc_result != DC_OK) {
6839                         DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6840                                       drm_mode->hdisplay,
6841                                       drm_mode->vdisplay,
6842                                       drm_mode->clock,
6843                                       dc_result,
6844                                       dc_status_to_str(dc_result));
6845
6846                         dc_stream_release(stream);
6847                         stream = NULL;
6848                         requested_bpc -= 2; /* lower bpc to retry validation */
6849                 }
6850
6851         } while (stream == NULL && requested_bpc >= 6);
6852
6853         if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6854                 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6855
6856                 aconnector->force_yuv420_output = true;
6857                 stream = create_validate_stream_for_sink(aconnector, drm_mode,
6858                                                 dm_state, old_stream);
6859                 aconnector->force_yuv420_output = false;
6860         }
6861
6862         return stream;
6863 }
6864
6865 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6866                                    struct drm_display_mode *mode)
6867 {
6868         int result = MODE_ERROR;
6869         struct dc_sink *dc_sink;
6870         /* TODO: Unhardcode stream count */
6871         struct dc_stream_state *stream;
6872         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6873
6874         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6875                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6876                 return result;
6877
6878         /*
6879          * Only run this the first time mode_valid is called to initilialize
6880          * EDID mgmt
6881          */
6882         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6883                 !aconnector->dc_em_sink)
6884                 handle_edid_mgmt(aconnector);
6885
6886         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6887
6888         if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6889                                 aconnector->base.force != DRM_FORCE_ON) {
6890                 DRM_ERROR("dc_sink is NULL!\n");
6891                 goto fail;
6892         }
6893
6894         stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6895         if (stream) {
6896                 dc_stream_release(stream);
6897                 result = MODE_OK;
6898         }
6899
6900 fail:
6901         /* TODO: error handling*/
6902         return result;
6903 }
6904
6905 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6906                                 struct dc_info_packet *out)
6907 {
6908         struct hdmi_drm_infoframe frame;
6909         unsigned char buf[30]; /* 26 + 4 */
6910         ssize_t len;
6911         int ret, i;
6912
6913         memset(out, 0, sizeof(*out));
6914
6915         if (!state->hdr_output_metadata)
6916                 return 0;
6917
6918         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6919         if (ret)
6920                 return ret;
6921
6922         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6923         if (len < 0)
6924                 return (int)len;
6925
6926         /* Static metadata is a fixed 26 bytes + 4 byte header. */
6927         if (len != 30)
6928                 return -EINVAL;
6929
6930         /* Prepare the infopacket for DC. */
6931         switch (state->connector->connector_type) {
6932         case DRM_MODE_CONNECTOR_HDMIA:
6933                 out->hb0 = 0x87; /* type */
6934                 out->hb1 = 0x01; /* version */
6935                 out->hb2 = 0x1A; /* length */
6936                 out->sb[0] = buf[3]; /* checksum */
6937                 i = 1;
6938                 break;
6939
6940         case DRM_MODE_CONNECTOR_DisplayPort:
6941         case DRM_MODE_CONNECTOR_eDP:
6942                 out->hb0 = 0x00; /* sdp id, zero */
6943                 out->hb1 = 0x87; /* type */
6944                 out->hb2 = 0x1D; /* payload len - 1 */
6945                 out->hb3 = (0x13 << 2); /* sdp version */
6946                 out->sb[0] = 0x01; /* version */
6947                 out->sb[1] = 0x1A; /* length */
6948                 i = 2;
6949                 break;
6950
6951         default:
6952                 return -EINVAL;
6953         }
6954
6955         memcpy(&out->sb[i], &buf[4], 26);
6956         out->valid = true;
6957
6958         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6959                        sizeof(out->sb), false);
6960
6961         return 0;
6962 }
6963
6964 static int
6965 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6966                                  struct drm_atomic_state *state)
6967 {
6968         struct drm_connector_state *new_con_state =
6969                 drm_atomic_get_new_connector_state(state, conn);
6970         struct drm_connector_state *old_con_state =
6971                 drm_atomic_get_old_connector_state(state, conn);
6972         struct drm_crtc *crtc = new_con_state->crtc;
6973         struct drm_crtc_state *new_crtc_state;
6974         int ret;
6975
6976         trace_amdgpu_dm_connector_atomic_check(new_con_state);
6977
6978         if (!crtc)
6979                 return 0;
6980
6981         if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
6982                 struct dc_info_packet hdr_infopacket;
6983
6984                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6985                 if (ret)
6986                         return ret;
6987
6988                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6989                 if (IS_ERR(new_crtc_state))
6990                         return PTR_ERR(new_crtc_state);
6991
6992                 /*
6993                  * DC considers the stream backends changed if the
6994                  * static metadata changes. Forcing the modeset also
6995                  * gives a simple way for userspace to switch from
6996                  * 8bpc to 10bpc when setting the metadata to enter
6997                  * or exit HDR.
6998                  *
6999                  * Changing the static metadata after it's been
7000                  * set is permissible, however. So only force a
7001                  * modeset if we're entering or exiting HDR.
7002                  */
7003                 new_crtc_state->mode_changed =
7004                         !old_con_state->hdr_output_metadata ||
7005                         !new_con_state->hdr_output_metadata;
7006         }
7007
7008         return 0;
7009 }
7010
7011 static const struct drm_connector_helper_funcs
7012 amdgpu_dm_connector_helper_funcs = {
7013         /*
7014          * If hotplugging a second bigger display in FB Con mode, bigger resolution
7015          * modes will be filtered by drm_mode_validate_size(), and those modes
7016          * are missing after user start lightdm. So we need to renew modes list.
7017          * in get_modes call back, not just return the modes count
7018          */
7019         .get_modes = get_modes,
7020         .mode_valid = amdgpu_dm_connector_mode_valid,
7021         .atomic_check = amdgpu_dm_connector_atomic_check,
7022 };
7023
7024 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7025 {
7026 }
7027
7028 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
7029 {
7030         struct drm_atomic_state *state = new_crtc_state->state;
7031         struct drm_plane *plane;
7032         int num_active = 0;
7033
7034         drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7035                 struct drm_plane_state *new_plane_state;
7036
7037                 /* Cursor planes are "fake". */
7038                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7039                         continue;
7040
7041                 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7042
7043                 if (!new_plane_state) {
7044                         /*
7045                          * The plane is enable on the CRTC and hasn't changed
7046                          * state. This means that it previously passed
7047                          * validation and is therefore enabled.
7048                          */
7049                         num_active += 1;
7050                         continue;
7051                 }
7052
7053                 /* We need a framebuffer to be considered enabled. */
7054                 num_active += (new_plane_state->fb != NULL);
7055         }
7056
7057         return num_active;
7058 }
7059
7060 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7061                                          struct drm_crtc_state *new_crtc_state)
7062 {
7063         struct dm_crtc_state *dm_new_crtc_state =
7064                 to_dm_crtc_state(new_crtc_state);
7065
7066         dm_new_crtc_state->active_planes = 0;
7067
7068         if (!dm_new_crtc_state->stream)
7069                 return;
7070
7071         dm_new_crtc_state->active_planes =
7072                 count_crtc_active_planes(new_crtc_state);
7073 }
7074
7075 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
7076                                        struct drm_atomic_state *state)
7077 {
7078         struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7079                                                                           crtc);
7080         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
7081         struct dc *dc = adev->dm.dc;
7082         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
7083         int ret = -EINVAL;
7084
7085         trace_amdgpu_dm_crtc_atomic_check(crtc_state);
7086
7087         dm_update_crtc_active_planes(crtc, crtc_state);
7088
7089         if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7090                      modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
7091                 return ret;
7092         }
7093
7094         /*
7095          * We require the primary plane to be enabled whenever the CRTC is, otherwise
7096          * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7097          * planes are disabled, which is not supported by the hardware. And there is legacy
7098          * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
7099          */
7100         if (crtc_state->enable &&
7101             !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7102                 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
7103                 return -EINVAL;
7104         }
7105
7106         /* In some use cases, like reset, no stream is attached */
7107         if (!dm_crtc_state->stream)
7108                 return 0;
7109
7110         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
7111                 return 0;
7112
7113         DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
7114         return ret;
7115 }
7116
7117 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7118                                       const struct drm_display_mode *mode,
7119                                       struct drm_display_mode *adjusted_mode)
7120 {
7121         return true;
7122 }
7123
7124 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7125         .disable = dm_crtc_helper_disable,
7126         .atomic_check = dm_crtc_helper_atomic_check,
7127         .mode_fixup = dm_crtc_helper_mode_fixup,
7128         .get_scanout_position = amdgpu_crtc_get_scanout_position,
7129 };
7130
7131 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7132 {
7133
7134 }
7135
7136 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7137 {
7138         switch (display_color_depth) {
7139                 case COLOR_DEPTH_666:
7140                         return 6;
7141                 case COLOR_DEPTH_888:
7142                         return 8;
7143                 case COLOR_DEPTH_101010:
7144                         return 10;
7145                 case COLOR_DEPTH_121212:
7146                         return 12;
7147                 case COLOR_DEPTH_141414:
7148                         return 14;
7149                 case COLOR_DEPTH_161616:
7150                         return 16;
7151                 default:
7152                         break;
7153                 }
7154         return 0;
7155 }
7156
7157 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7158                                           struct drm_crtc_state *crtc_state,
7159                                           struct drm_connector_state *conn_state)
7160 {
7161         struct drm_atomic_state *state = crtc_state->state;
7162         struct drm_connector *connector = conn_state->connector;
7163         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7164         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7165         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7166         struct drm_dp_mst_topology_mgr *mst_mgr;
7167         struct drm_dp_mst_port *mst_port;
7168         enum dc_color_depth color_depth;
7169         int clock, bpp = 0;
7170         bool is_y420 = false;
7171
7172         if (!aconnector->port || !aconnector->dc_sink)
7173                 return 0;
7174
7175         mst_port = aconnector->port;
7176         mst_mgr = &aconnector->mst_port->mst_mgr;
7177
7178         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7179                 return 0;
7180
7181         if (!state->duplicated) {
7182                 int max_bpc = conn_state->max_requested_bpc;
7183                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7184                                 aconnector->force_yuv420_output;
7185                 color_depth = convert_color_depth_from_display_info(connector,
7186                                                                     is_y420,
7187                                                                     max_bpc);
7188                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7189                 clock = adjusted_mode->clock;
7190                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
7191         }
7192         dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7193                                                                            mst_mgr,
7194                                                                            mst_port,
7195                                                                            dm_new_connector_state->pbn,
7196                                                                            dm_mst_get_pbn_divider(aconnector->dc_link));
7197         if (dm_new_connector_state->vcpi_slots < 0) {
7198                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7199                 return dm_new_connector_state->vcpi_slots;
7200         }
7201         return 0;
7202 }
7203
7204 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7205         .disable = dm_encoder_helper_disable,
7206         .atomic_check = dm_encoder_helper_atomic_check
7207 };
7208
7209 #if defined(CONFIG_DRM_AMD_DC_DCN)
7210 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7211                                             struct dc_state *dc_state,
7212                                             struct dsc_mst_fairness_vars *vars)
7213 {
7214         struct dc_stream_state *stream = NULL;
7215         struct drm_connector *connector;
7216         struct drm_connector_state *new_con_state;
7217         struct amdgpu_dm_connector *aconnector;
7218         struct dm_connector_state *dm_conn_state;
7219         int i, j, clock;
7220         int vcpi, pbn_div, pbn = 0;
7221
7222         for_each_new_connector_in_state(state, connector, new_con_state, i) {
7223
7224                 aconnector = to_amdgpu_dm_connector(connector);
7225
7226                 if (!aconnector->port)
7227                         continue;
7228
7229                 if (!new_con_state || !new_con_state->crtc)
7230                         continue;
7231
7232                 dm_conn_state = to_dm_connector_state(new_con_state);
7233
7234                 for (j = 0; j < dc_state->stream_count; j++) {
7235                         stream = dc_state->streams[j];
7236                         if (!stream)
7237                                 continue;
7238
7239                         if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7240                                 break;
7241
7242                         stream = NULL;
7243                 }
7244
7245                 if (!stream)
7246                         continue;
7247
7248                 if (stream->timing.flags.DSC != 1) {
7249                         drm_dp_mst_atomic_enable_dsc(state,
7250                                                      aconnector->port,
7251                                                      dm_conn_state->pbn,
7252                                                      0,
7253                                                      false);
7254                         continue;
7255                 }
7256
7257                 pbn_div = dm_mst_get_pbn_divider(stream->link);
7258                 clock = stream->timing.pix_clk_100hz / 10;
7259                 /* pbn is calculated by compute_mst_dsc_configs_for_state*/
7260                 for (j = 0; j < dc_state->stream_count; j++) {
7261                         if (vars[j].aconnector == aconnector) {
7262                                 pbn = vars[j].pbn;
7263                                 break;
7264                         }
7265                 }
7266
7267                 vcpi = drm_dp_mst_atomic_enable_dsc(state,
7268                                                     aconnector->port,
7269                                                     pbn, pbn_div,
7270                                                     true);
7271                 if (vcpi < 0)
7272                         return vcpi;
7273
7274                 dm_conn_state->pbn = pbn;
7275                 dm_conn_state->vcpi_slots = vcpi;
7276         }
7277         return 0;
7278 }
7279 #endif
7280
7281 static void dm_drm_plane_reset(struct drm_plane *plane)
7282 {
7283         struct dm_plane_state *amdgpu_state = NULL;
7284
7285         if (plane->state)
7286                 plane->funcs->atomic_destroy_state(plane, plane->state);
7287
7288         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
7289         WARN_ON(amdgpu_state == NULL);
7290
7291         if (amdgpu_state)
7292                 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
7293 }
7294
7295 static struct drm_plane_state *
7296 dm_drm_plane_duplicate_state(struct drm_plane *plane)
7297 {
7298         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7299
7300         old_dm_plane_state = to_dm_plane_state(plane->state);
7301         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7302         if (!dm_plane_state)
7303                 return NULL;
7304
7305         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7306
7307         if (old_dm_plane_state->dc_state) {
7308                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7309                 dc_plane_state_retain(dm_plane_state->dc_state);
7310         }
7311
7312         return &dm_plane_state->base;
7313 }
7314
7315 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
7316                                 struct drm_plane_state *state)
7317 {
7318         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7319
7320         if (dm_plane_state->dc_state)
7321                 dc_plane_state_release(dm_plane_state->dc_state);
7322
7323         drm_atomic_helper_plane_destroy_state(plane, state);
7324 }
7325
7326 static const struct drm_plane_funcs dm_plane_funcs = {
7327         .update_plane   = drm_atomic_helper_update_plane,
7328         .disable_plane  = drm_atomic_helper_disable_plane,
7329         .destroy        = drm_primary_helper_destroy,
7330         .reset = dm_drm_plane_reset,
7331         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
7332         .atomic_destroy_state = dm_drm_plane_destroy_state,
7333         .format_mod_supported = dm_plane_format_mod_supported,
7334 };
7335
7336 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7337                                       struct drm_plane_state *new_state)
7338 {
7339         struct amdgpu_framebuffer *afb;
7340         struct drm_gem_object *obj;
7341         struct amdgpu_device *adev;
7342         struct amdgpu_bo *rbo;
7343         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
7344         struct list_head list;
7345         struct ttm_validate_buffer tv;
7346         struct ww_acquire_ctx ticket;
7347         uint32_t domain;
7348         int r;
7349
7350         if (!new_state->fb) {
7351                 DRM_DEBUG_KMS("No FB bound\n");
7352                 return 0;
7353         }
7354
7355         afb = to_amdgpu_framebuffer(new_state->fb);
7356         obj = new_state->fb->obj[0];
7357         rbo = gem_to_amdgpu_bo(obj);
7358         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7359         INIT_LIST_HEAD(&list);
7360
7361         tv.bo = &rbo->tbo;
7362         tv.num_shared = 1;
7363         list_add(&tv.head, &list);
7364
7365         r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
7366         if (r) {
7367                 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7368                 return r;
7369         }
7370
7371         if (plane->type != DRM_PLANE_TYPE_CURSOR)
7372                 domain = amdgpu_display_supported_domains(adev, rbo->flags);
7373         else
7374                 domain = AMDGPU_GEM_DOMAIN_VRAM;
7375
7376         r = amdgpu_bo_pin(rbo, domain);
7377         if (unlikely(r != 0)) {
7378                 if (r != -ERESTARTSYS)
7379                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7380                 ttm_eu_backoff_reservation(&ticket, &list);
7381                 return r;
7382         }
7383
7384         r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7385         if (unlikely(r != 0)) {
7386                 amdgpu_bo_unpin(rbo);
7387                 ttm_eu_backoff_reservation(&ticket, &list);
7388                 DRM_ERROR("%p bind failed\n", rbo);
7389                 return r;
7390         }
7391
7392         ttm_eu_backoff_reservation(&ticket, &list);
7393
7394         afb->address = amdgpu_bo_gpu_offset(rbo);
7395
7396         amdgpu_bo_ref(rbo);
7397
7398         /**
7399          * We don't do surface updates on planes that have been newly created,
7400          * but we also don't have the afb->address during atomic check.
7401          *
7402          * Fill in buffer attributes depending on the address here, but only on
7403          * newly created planes since they're not being used by DC yet and this
7404          * won't modify global state.
7405          */
7406         dm_plane_state_old = to_dm_plane_state(plane->state);
7407         dm_plane_state_new = to_dm_plane_state(new_state);
7408
7409         if (dm_plane_state_new->dc_state &&
7410             dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7411                 struct dc_plane_state *plane_state =
7412                         dm_plane_state_new->dc_state;
7413                 bool force_disable_dcc = !plane_state->dcc.enable;
7414
7415                 fill_plane_buffer_attributes(
7416                         adev, afb, plane_state->format, plane_state->rotation,
7417                         afb->tiling_flags,
7418                         &plane_state->tiling_info, &plane_state->plane_size,
7419                         &plane_state->dcc, &plane_state->address,
7420                         afb->tmz_surface, force_disable_dcc);
7421         }
7422
7423         return 0;
7424 }
7425
7426 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7427                                        struct drm_plane_state *old_state)
7428 {
7429         struct amdgpu_bo *rbo;
7430         int r;
7431
7432         if (!old_state->fb)
7433                 return;
7434
7435         rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7436         r = amdgpu_bo_reserve(rbo, false);
7437         if (unlikely(r)) {
7438                 DRM_ERROR("failed to reserve rbo before unpin\n");
7439                 return;
7440         }
7441
7442         amdgpu_bo_unpin(rbo);
7443         amdgpu_bo_unreserve(rbo);
7444         amdgpu_bo_unref(&rbo);
7445 }
7446
7447 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7448                                        struct drm_crtc_state *new_crtc_state)
7449 {
7450         struct drm_framebuffer *fb = state->fb;
7451         int min_downscale, max_upscale;
7452         int min_scale = 0;
7453         int max_scale = INT_MAX;
7454
7455         /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7456         if (fb && state->crtc) {
7457                 /* Validate viewport to cover the case when only the position changes */
7458                 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7459                         int viewport_width = state->crtc_w;
7460                         int viewport_height = state->crtc_h;
7461
7462                         if (state->crtc_x < 0)
7463                                 viewport_width += state->crtc_x;
7464                         else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7465                                 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7466
7467                         if (state->crtc_y < 0)
7468                                 viewport_height += state->crtc_y;
7469                         else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7470                                 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7471
7472                         if (viewport_width < 0 || viewport_height < 0) {
7473                                 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7474                                 return -EINVAL;
7475                         } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7476                                 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7477                                 return -EINVAL;
7478                         } else if (viewport_height < MIN_VIEWPORT_SIZE) {
7479                                 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7480                                 return -EINVAL;
7481                         }
7482
7483                 }
7484
7485                 /* Get min/max allowed scaling factors from plane caps. */
7486                 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7487                                              &min_downscale, &max_upscale);
7488                 /*
7489                  * Convert to drm convention: 16.16 fixed point, instead of dc's
7490                  * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7491                  * dst/src, so min_scale = 1.0 / max_upscale, etc.
7492                  */
7493                 min_scale = (1000 << 16) / max_upscale;
7494                 max_scale = (1000 << 16) / min_downscale;
7495         }
7496
7497         return drm_atomic_helper_check_plane_state(
7498                 state, new_crtc_state, min_scale, max_scale, true, true);
7499 }
7500
7501 static int dm_plane_atomic_check(struct drm_plane *plane,
7502                                  struct drm_atomic_state *state)
7503 {
7504         struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7505                                                                                  plane);
7506         struct amdgpu_device *adev = drm_to_adev(plane->dev);
7507         struct dc *dc = adev->dm.dc;
7508         struct dm_plane_state *dm_plane_state;
7509         struct dc_scaling_info scaling_info;
7510         struct drm_crtc_state *new_crtc_state;
7511         int ret;
7512
7513         trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7514
7515         dm_plane_state = to_dm_plane_state(new_plane_state);
7516
7517         if (!dm_plane_state->dc_state)
7518                 return 0;
7519
7520         new_crtc_state =
7521                 drm_atomic_get_new_crtc_state(state,
7522                                               new_plane_state->crtc);
7523         if (!new_crtc_state)
7524                 return -EINVAL;
7525
7526         ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7527         if (ret)
7528                 return ret;
7529
7530         ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
7531         if (ret)
7532                 return ret;
7533
7534         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7535                 return 0;
7536
7537         return -EINVAL;
7538 }
7539
7540 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7541                                        struct drm_atomic_state *state)
7542 {
7543         /* Only support async updates on cursor planes. */
7544         if (plane->type != DRM_PLANE_TYPE_CURSOR)
7545                 return -EINVAL;
7546
7547         return 0;
7548 }
7549
7550 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7551                                          struct drm_atomic_state *state)
7552 {
7553         struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7554                                                                            plane);
7555         struct drm_plane_state *old_state =
7556                 drm_atomic_get_old_plane_state(state, plane);
7557
7558         trace_amdgpu_dm_atomic_update_cursor(new_state);
7559
7560         swap(plane->state->fb, new_state->fb);
7561
7562         plane->state->src_x = new_state->src_x;
7563         plane->state->src_y = new_state->src_y;
7564         plane->state->src_w = new_state->src_w;
7565         plane->state->src_h = new_state->src_h;
7566         plane->state->crtc_x = new_state->crtc_x;
7567         plane->state->crtc_y = new_state->crtc_y;
7568         plane->state->crtc_w = new_state->crtc_w;
7569         plane->state->crtc_h = new_state->crtc_h;
7570
7571         handle_cursor_update(plane, old_state);
7572 }
7573
7574 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7575         .prepare_fb = dm_plane_helper_prepare_fb,
7576         .cleanup_fb = dm_plane_helper_cleanup_fb,
7577         .atomic_check = dm_plane_atomic_check,
7578         .atomic_async_check = dm_plane_atomic_async_check,
7579         .atomic_async_update = dm_plane_atomic_async_update
7580 };
7581
7582 /*
7583  * TODO: these are currently initialized to rgb formats only.
7584  * For future use cases we should either initialize them dynamically based on
7585  * plane capabilities, or initialize this array to all formats, so internal drm
7586  * check will succeed, and let DC implement proper check
7587  */
7588 static const uint32_t rgb_formats[] = {
7589         DRM_FORMAT_XRGB8888,
7590         DRM_FORMAT_ARGB8888,
7591         DRM_FORMAT_RGBA8888,
7592         DRM_FORMAT_XRGB2101010,
7593         DRM_FORMAT_XBGR2101010,
7594         DRM_FORMAT_ARGB2101010,
7595         DRM_FORMAT_ABGR2101010,
7596         DRM_FORMAT_XRGB16161616,
7597         DRM_FORMAT_XBGR16161616,
7598         DRM_FORMAT_ARGB16161616,
7599         DRM_FORMAT_ABGR16161616,
7600         DRM_FORMAT_XBGR8888,
7601         DRM_FORMAT_ABGR8888,
7602         DRM_FORMAT_RGB565,
7603 };
7604
7605 static const uint32_t overlay_formats[] = {
7606         DRM_FORMAT_XRGB8888,
7607         DRM_FORMAT_ARGB8888,
7608         DRM_FORMAT_RGBA8888,
7609         DRM_FORMAT_XBGR8888,
7610         DRM_FORMAT_ABGR8888,
7611         DRM_FORMAT_RGB565
7612 };
7613
7614 static const u32 cursor_formats[] = {
7615         DRM_FORMAT_ARGB8888
7616 };
7617
7618 static int get_plane_formats(const struct drm_plane *plane,
7619                              const struct dc_plane_cap *plane_cap,
7620                              uint32_t *formats, int max_formats)
7621 {
7622         int i, num_formats = 0;
7623
7624         /*
7625          * TODO: Query support for each group of formats directly from
7626          * DC plane caps. This will require adding more formats to the
7627          * caps list.
7628          */
7629
7630         switch (plane->type) {
7631         case DRM_PLANE_TYPE_PRIMARY:
7632                 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7633                         if (num_formats >= max_formats)
7634                                 break;
7635
7636                         formats[num_formats++] = rgb_formats[i];
7637                 }
7638
7639                 if (plane_cap && plane_cap->pixel_format_support.nv12)
7640                         formats[num_formats++] = DRM_FORMAT_NV12;
7641                 if (plane_cap && plane_cap->pixel_format_support.p010)
7642                         formats[num_formats++] = DRM_FORMAT_P010;
7643                 if (plane_cap && plane_cap->pixel_format_support.fp16) {
7644                         formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7645                         formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7646                         formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7647                         formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7648                 }
7649                 break;
7650
7651         case DRM_PLANE_TYPE_OVERLAY:
7652                 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7653                         if (num_formats >= max_formats)
7654                                 break;
7655
7656                         formats[num_formats++] = overlay_formats[i];
7657                 }
7658                 break;
7659
7660         case DRM_PLANE_TYPE_CURSOR:
7661                 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7662                         if (num_formats >= max_formats)
7663                                 break;
7664
7665                         formats[num_formats++] = cursor_formats[i];
7666                 }
7667                 break;
7668         }
7669
7670         return num_formats;
7671 }
7672
7673 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7674                                 struct drm_plane *plane,
7675                                 unsigned long possible_crtcs,
7676                                 const struct dc_plane_cap *plane_cap)
7677 {
7678         uint32_t formats[32];
7679         int num_formats;
7680         int res = -EPERM;
7681         unsigned int supported_rotations;
7682         uint64_t *modifiers = NULL;
7683
7684         num_formats = get_plane_formats(plane, plane_cap, formats,
7685                                         ARRAY_SIZE(formats));
7686
7687         res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7688         if (res)
7689                 return res;
7690
7691         res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7692                                        &dm_plane_funcs, formats, num_formats,
7693                                        modifiers, plane->type, NULL);
7694         kfree(modifiers);
7695         if (res)
7696                 return res;
7697
7698         if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7699             plane_cap && plane_cap->per_pixel_alpha) {
7700                 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7701                                           BIT(DRM_MODE_BLEND_PREMULTI);
7702
7703                 drm_plane_create_alpha_property(plane);
7704                 drm_plane_create_blend_mode_property(plane, blend_caps);
7705         }
7706
7707         if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7708             plane_cap &&
7709             (plane_cap->pixel_format_support.nv12 ||
7710              plane_cap->pixel_format_support.p010)) {
7711                 /* This only affects YUV formats. */
7712                 drm_plane_create_color_properties(
7713                         plane,
7714                         BIT(DRM_COLOR_YCBCR_BT601) |
7715                         BIT(DRM_COLOR_YCBCR_BT709) |
7716                         BIT(DRM_COLOR_YCBCR_BT2020),
7717                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7718                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7719                         DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7720         }
7721
7722         supported_rotations =
7723                 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7724                 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7725
7726         if (dm->adev->asic_type >= CHIP_BONAIRE &&
7727             plane->type != DRM_PLANE_TYPE_CURSOR)
7728                 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7729                                                    supported_rotations);
7730
7731         drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7732
7733         /* Create (reset) the plane state */
7734         if (plane->funcs->reset)
7735                 plane->funcs->reset(plane);
7736
7737         return 0;
7738 }
7739
7740 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7741                                struct drm_plane *plane,
7742                                uint32_t crtc_index)
7743 {
7744         struct amdgpu_crtc *acrtc = NULL;
7745         struct drm_plane *cursor_plane;
7746
7747         int res = -ENOMEM;
7748
7749         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7750         if (!cursor_plane)
7751                 goto fail;
7752
7753         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7754         res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7755
7756         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7757         if (!acrtc)
7758                 goto fail;
7759
7760         res = drm_crtc_init_with_planes(
7761                         dm->ddev,
7762                         &acrtc->base,
7763                         plane,
7764                         cursor_plane,
7765                         &amdgpu_dm_crtc_funcs, NULL);
7766
7767         if (res)
7768                 goto fail;
7769
7770         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7771
7772         /* Create (reset) the plane state */
7773         if (acrtc->base.funcs->reset)
7774                 acrtc->base.funcs->reset(&acrtc->base);
7775
7776         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7777         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7778
7779         acrtc->crtc_id = crtc_index;
7780         acrtc->base.enabled = false;
7781         acrtc->otg_inst = -1;
7782
7783         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7784         drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7785                                    true, MAX_COLOR_LUT_ENTRIES);
7786         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7787
7788         return 0;
7789
7790 fail:
7791         kfree(acrtc);
7792         kfree(cursor_plane);
7793         return res;
7794 }
7795
7796
7797 static int to_drm_connector_type(enum signal_type st)
7798 {
7799         switch (st) {
7800         case SIGNAL_TYPE_HDMI_TYPE_A:
7801                 return DRM_MODE_CONNECTOR_HDMIA;
7802         case SIGNAL_TYPE_EDP:
7803                 return DRM_MODE_CONNECTOR_eDP;
7804         case SIGNAL_TYPE_LVDS:
7805                 return DRM_MODE_CONNECTOR_LVDS;
7806         case SIGNAL_TYPE_RGB:
7807                 return DRM_MODE_CONNECTOR_VGA;
7808         case SIGNAL_TYPE_DISPLAY_PORT:
7809         case SIGNAL_TYPE_DISPLAY_PORT_MST:
7810                 return DRM_MODE_CONNECTOR_DisplayPort;
7811         case SIGNAL_TYPE_DVI_DUAL_LINK:
7812         case SIGNAL_TYPE_DVI_SINGLE_LINK:
7813                 return DRM_MODE_CONNECTOR_DVID;
7814         case SIGNAL_TYPE_VIRTUAL:
7815                 return DRM_MODE_CONNECTOR_VIRTUAL;
7816
7817         default:
7818                 return DRM_MODE_CONNECTOR_Unknown;
7819         }
7820 }
7821
7822 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7823 {
7824         struct drm_encoder *encoder;
7825
7826         /* There is only one encoder per connector */
7827         drm_connector_for_each_possible_encoder(connector, encoder)
7828                 return encoder;
7829
7830         return NULL;
7831 }
7832
7833 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7834 {
7835         struct drm_encoder *encoder;
7836         struct amdgpu_encoder *amdgpu_encoder;
7837
7838         encoder = amdgpu_dm_connector_to_encoder(connector);
7839
7840         if (encoder == NULL)
7841                 return;
7842
7843         amdgpu_encoder = to_amdgpu_encoder(encoder);
7844
7845         amdgpu_encoder->native_mode.clock = 0;
7846
7847         if (!list_empty(&connector->probed_modes)) {
7848                 struct drm_display_mode *preferred_mode = NULL;
7849
7850                 list_for_each_entry(preferred_mode,
7851                                     &connector->probed_modes,
7852                                     head) {
7853                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7854                                 amdgpu_encoder->native_mode = *preferred_mode;
7855
7856                         break;
7857                 }
7858
7859         }
7860 }
7861
7862 static struct drm_display_mode *
7863 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7864                              char *name,
7865                              int hdisplay, int vdisplay)
7866 {
7867         struct drm_device *dev = encoder->dev;
7868         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7869         struct drm_display_mode *mode = NULL;
7870         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7871
7872         mode = drm_mode_duplicate(dev, native_mode);
7873
7874         if (mode == NULL)
7875                 return NULL;
7876
7877         mode->hdisplay = hdisplay;
7878         mode->vdisplay = vdisplay;
7879         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7880         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7881
7882         return mode;
7883
7884 }
7885
7886 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7887                                                  struct drm_connector *connector)
7888 {
7889         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7890         struct drm_display_mode *mode = NULL;
7891         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7892         struct amdgpu_dm_connector *amdgpu_dm_connector =
7893                                 to_amdgpu_dm_connector(connector);
7894         int i;
7895         int n;
7896         struct mode_size {
7897                 char name[DRM_DISPLAY_MODE_LEN];
7898                 int w;
7899                 int h;
7900         } common_modes[] = {
7901                 {  "640x480",  640,  480},
7902                 {  "800x600",  800,  600},
7903                 { "1024x768", 1024,  768},
7904                 { "1280x720", 1280,  720},
7905                 { "1280x800", 1280,  800},
7906                 {"1280x1024", 1280, 1024},
7907                 { "1440x900", 1440,  900},
7908                 {"1680x1050", 1680, 1050},
7909                 {"1600x1200", 1600, 1200},
7910                 {"1920x1080", 1920, 1080},
7911                 {"1920x1200", 1920, 1200}
7912         };
7913
7914         n = ARRAY_SIZE(common_modes);
7915
7916         for (i = 0; i < n; i++) {
7917                 struct drm_display_mode *curmode = NULL;
7918                 bool mode_existed = false;
7919
7920                 if (common_modes[i].w > native_mode->hdisplay ||
7921                     common_modes[i].h > native_mode->vdisplay ||
7922                    (common_modes[i].w == native_mode->hdisplay &&
7923                     common_modes[i].h == native_mode->vdisplay))
7924                         continue;
7925
7926                 list_for_each_entry(curmode, &connector->probed_modes, head) {
7927                         if (common_modes[i].w == curmode->hdisplay &&
7928                             common_modes[i].h == curmode->vdisplay) {
7929                                 mode_existed = true;
7930                                 break;
7931                         }
7932                 }
7933
7934                 if (mode_existed)
7935                         continue;
7936
7937                 mode = amdgpu_dm_create_common_mode(encoder,
7938                                 common_modes[i].name, common_modes[i].w,
7939                                 common_modes[i].h);
7940                 if (!mode)
7941                         continue;
7942
7943                 drm_mode_probed_add(connector, mode);
7944                 amdgpu_dm_connector->num_modes++;
7945         }
7946 }
7947
7948 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
7949 {
7950         struct drm_encoder *encoder;
7951         struct amdgpu_encoder *amdgpu_encoder;
7952         const struct drm_display_mode *native_mode;
7953
7954         if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
7955             connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
7956                 return;
7957
7958         encoder = amdgpu_dm_connector_to_encoder(connector);
7959         if (!encoder)
7960                 return;
7961
7962         amdgpu_encoder = to_amdgpu_encoder(encoder);
7963
7964         native_mode = &amdgpu_encoder->native_mode;
7965         if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
7966                 return;
7967
7968         drm_connector_set_panel_orientation_with_quirk(connector,
7969                                                        DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
7970                                                        native_mode->hdisplay,
7971                                                        native_mode->vdisplay);
7972 }
7973
7974 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7975                                               struct edid *edid)
7976 {
7977         struct amdgpu_dm_connector *amdgpu_dm_connector =
7978                         to_amdgpu_dm_connector(connector);
7979
7980         if (edid) {
7981                 /* empty probed_modes */
7982                 INIT_LIST_HEAD(&connector->probed_modes);
7983                 amdgpu_dm_connector->num_modes =
7984                                 drm_add_edid_modes(connector, edid);
7985
7986                 /* sorting the probed modes before calling function
7987                  * amdgpu_dm_get_native_mode() since EDID can have
7988                  * more than one preferred mode. The modes that are
7989                  * later in the probed mode list could be of higher
7990                  * and preferred resolution. For example, 3840x2160
7991                  * resolution in base EDID preferred timing and 4096x2160
7992                  * preferred resolution in DID extension block later.
7993                  */
7994                 drm_mode_sort(&connector->probed_modes);
7995                 amdgpu_dm_get_native_mode(connector);
7996
7997                 /* Freesync capabilities are reset by calling
7998                  * drm_add_edid_modes() and need to be
7999                  * restored here.
8000                  */
8001                 amdgpu_dm_update_freesync_caps(connector, edid);
8002
8003                 amdgpu_set_panel_orientation(connector);
8004         } else {
8005                 amdgpu_dm_connector->num_modes = 0;
8006         }
8007 }
8008
8009 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8010                               struct drm_display_mode *mode)
8011 {
8012         struct drm_display_mode *m;
8013
8014         list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8015                 if (drm_mode_equal(m, mode))
8016                         return true;
8017         }
8018
8019         return false;
8020 }
8021
8022 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8023 {
8024         const struct drm_display_mode *m;
8025         struct drm_display_mode *new_mode;
8026         uint i;
8027         uint32_t new_modes_count = 0;
8028
8029         /* Standard FPS values
8030          *
8031          * 23.976   - TV/NTSC
8032          * 24       - Cinema
8033          * 25       - TV/PAL
8034          * 29.97    - TV/NTSC
8035          * 30       - TV/NTSC
8036          * 48       - Cinema HFR
8037          * 50       - TV/PAL
8038          * 60       - Commonly used
8039          * 48,72,96 - Multiples of 24
8040          */
8041         static const uint32_t common_rates[] = {
8042                 23976, 24000, 25000, 29970, 30000,
8043                 48000, 50000, 60000, 72000, 96000
8044         };
8045
8046         /*
8047          * Find mode with highest refresh rate with the same resolution
8048          * as the preferred mode. Some monitors report a preferred mode
8049          * with lower resolution than the highest refresh rate supported.
8050          */
8051
8052         m = get_highest_refresh_rate_mode(aconnector, true);
8053         if (!m)
8054                 return 0;
8055
8056         for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8057                 uint64_t target_vtotal, target_vtotal_diff;
8058                 uint64_t num, den;
8059
8060                 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8061                         continue;
8062
8063                 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8064                     common_rates[i] > aconnector->max_vfreq * 1000)
8065                         continue;
8066
8067                 num = (unsigned long long)m->clock * 1000 * 1000;
8068                 den = common_rates[i] * (unsigned long long)m->htotal;
8069                 target_vtotal = div_u64(num, den);
8070                 target_vtotal_diff = target_vtotal - m->vtotal;
8071
8072                 /* Check for illegal modes */
8073                 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8074                     m->vsync_end + target_vtotal_diff < m->vsync_start ||
8075                     m->vtotal + target_vtotal_diff < m->vsync_end)
8076                         continue;
8077
8078                 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8079                 if (!new_mode)
8080                         goto out;
8081
8082                 new_mode->vtotal += (u16)target_vtotal_diff;
8083                 new_mode->vsync_start += (u16)target_vtotal_diff;
8084                 new_mode->vsync_end += (u16)target_vtotal_diff;
8085                 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8086                 new_mode->type |= DRM_MODE_TYPE_DRIVER;
8087
8088                 if (!is_duplicate_mode(aconnector, new_mode)) {
8089                         drm_mode_probed_add(&aconnector->base, new_mode);
8090                         new_modes_count += 1;
8091                 } else
8092                         drm_mode_destroy(aconnector->base.dev, new_mode);
8093         }
8094  out:
8095         return new_modes_count;
8096 }
8097
8098 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8099                                                    struct edid *edid)
8100 {
8101         struct amdgpu_dm_connector *amdgpu_dm_connector =
8102                 to_amdgpu_dm_connector(connector);
8103
8104         if (!(amdgpu_freesync_vid_mode && edid))
8105                 return;
8106
8107         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8108                 amdgpu_dm_connector->num_modes +=
8109                         add_fs_modes(amdgpu_dm_connector);
8110 }
8111
8112 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
8113 {
8114         struct amdgpu_dm_connector *amdgpu_dm_connector =
8115                         to_amdgpu_dm_connector(connector);
8116         struct drm_encoder *encoder;
8117         struct edid *edid = amdgpu_dm_connector->edid;
8118
8119         encoder = amdgpu_dm_connector_to_encoder(connector);
8120
8121         if (!drm_edid_is_valid(edid)) {
8122                 amdgpu_dm_connector->num_modes =
8123                                 drm_add_modes_noedid(connector, 640, 480);
8124         } else {
8125                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
8126                 amdgpu_dm_connector_add_common_modes(encoder, connector);
8127                 amdgpu_dm_connector_add_freesync_modes(connector, edid);
8128         }
8129         amdgpu_dm_fbc_init(connector);
8130
8131         return amdgpu_dm_connector->num_modes;
8132 }
8133
8134 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8135                                      struct amdgpu_dm_connector *aconnector,
8136                                      int connector_type,
8137                                      struct dc_link *link,
8138                                      int link_index)
8139 {
8140         struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8141
8142         /*
8143          * Some of the properties below require access to state, like bpc.
8144          * Allocate some default initial connector state with our reset helper.
8145          */
8146         if (aconnector->base.funcs->reset)
8147                 aconnector->base.funcs->reset(&aconnector->base);
8148
8149         aconnector->connector_id = link_index;
8150         aconnector->dc_link = link;
8151         aconnector->base.interlace_allowed = false;
8152         aconnector->base.doublescan_allowed = false;
8153         aconnector->base.stereo_allowed = false;
8154         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8155         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8156         aconnector->audio_inst = -1;
8157         mutex_init(&aconnector->hpd_lock);
8158
8159         /*
8160          * configure support HPD hot plug connector_>polled default value is 0
8161          * which means HPD hot plug not supported
8162          */
8163         switch (connector_type) {
8164         case DRM_MODE_CONNECTOR_HDMIA:
8165                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8166                 aconnector->base.ycbcr_420_allowed =
8167                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8168                 break;
8169         case DRM_MODE_CONNECTOR_DisplayPort:
8170                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8171                 aconnector->base.ycbcr_420_allowed =
8172                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
8173                 break;
8174         case DRM_MODE_CONNECTOR_DVID:
8175                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8176                 break;
8177         default:
8178                 break;
8179         }
8180
8181         drm_object_attach_property(&aconnector->base.base,
8182                                 dm->ddev->mode_config.scaling_mode_property,
8183                                 DRM_MODE_SCALE_NONE);
8184
8185         drm_object_attach_property(&aconnector->base.base,
8186                                 adev->mode_info.underscan_property,
8187                                 UNDERSCAN_OFF);
8188         drm_object_attach_property(&aconnector->base.base,
8189                                 adev->mode_info.underscan_hborder_property,
8190                                 0);
8191         drm_object_attach_property(&aconnector->base.base,
8192                                 adev->mode_info.underscan_vborder_property,
8193                                 0);
8194
8195         if (!aconnector->mst_port)
8196                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8197
8198         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
8199         aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8200         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8201
8202         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
8203             (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
8204                 drm_object_attach_property(&aconnector->base.base,
8205                                 adev->mode_info.abm_level_property, 0);
8206         }
8207
8208         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8209             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8210             connector_type == DRM_MODE_CONNECTOR_eDP) {
8211                 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8212
8213                 if (!aconnector->mst_port)
8214                         drm_connector_attach_vrr_capable_property(&aconnector->base);
8215
8216 #ifdef CONFIG_DRM_AMD_DC_HDCP
8217                 if (adev->dm.hdcp_workqueue)
8218                         drm_connector_attach_content_protection_property(&aconnector->base, true);
8219 #endif
8220         }
8221 }
8222
8223 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8224                               struct i2c_msg *msgs, int num)
8225 {
8226         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8227         struct ddc_service *ddc_service = i2c->ddc_service;
8228         struct i2c_command cmd;
8229         int i;
8230         int result = -EIO;
8231
8232         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8233
8234         if (!cmd.payloads)
8235                 return result;
8236
8237         cmd.number_of_payloads = num;
8238         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8239         cmd.speed = 100;
8240
8241         for (i = 0; i < num; i++) {
8242                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8243                 cmd.payloads[i].address = msgs[i].addr;
8244                 cmd.payloads[i].length = msgs[i].len;
8245                 cmd.payloads[i].data = msgs[i].buf;
8246         }
8247
8248         if (dc_submit_i2c(
8249                         ddc_service->ctx->dc,
8250                         ddc_service->ddc_pin->hw_info.ddc_channel,
8251                         &cmd))
8252                 result = num;
8253
8254         kfree(cmd.payloads);
8255         return result;
8256 }
8257
8258 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8259 {
8260         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8261 }
8262
8263 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8264         .master_xfer = amdgpu_dm_i2c_xfer,
8265         .functionality = amdgpu_dm_i2c_func,
8266 };
8267
8268 static struct amdgpu_i2c_adapter *
8269 create_i2c(struct ddc_service *ddc_service,
8270            int link_index,
8271            int *res)
8272 {
8273         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8274         struct amdgpu_i2c_adapter *i2c;
8275
8276         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8277         if (!i2c)
8278                 return NULL;
8279         i2c->base.owner = THIS_MODULE;
8280         i2c->base.class = I2C_CLASS_DDC;
8281         i2c->base.dev.parent = &adev->pdev->dev;
8282         i2c->base.algo = &amdgpu_dm_i2c_algo;
8283         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
8284         i2c_set_adapdata(&i2c->base, i2c);
8285         i2c->ddc_service = ddc_service;
8286         i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
8287
8288         return i2c;
8289 }
8290
8291
8292 /*
8293  * Note: this function assumes that dc_link_detect() was called for the
8294  * dc_link which will be represented by this aconnector.
8295  */
8296 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8297                                     struct amdgpu_dm_connector *aconnector,
8298                                     uint32_t link_index,
8299                                     struct amdgpu_encoder *aencoder)
8300 {
8301         int res = 0;
8302         int connector_type;
8303         struct dc *dc = dm->dc;
8304         struct dc_link *link = dc_get_link_at_index(dc, link_index);
8305         struct amdgpu_i2c_adapter *i2c;
8306
8307         link->priv = aconnector;
8308
8309         DRM_DEBUG_DRIVER("%s()\n", __func__);
8310
8311         i2c = create_i2c(link->ddc, link->link_index, &res);
8312         if (!i2c) {
8313                 DRM_ERROR("Failed to create i2c adapter data\n");
8314                 return -ENOMEM;
8315         }
8316
8317         aconnector->i2c = i2c;
8318         res = i2c_add_adapter(&i2c->base);
8319
8320         if (res) {
8321                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8322                 goto out_free;
8323         }
8324
8325         connector_type = to_drm_connector_type(link->connector_signal);
8326
8327         res = drm_connector_init_with_ddc(
8328                         dm->ddev,
8329                         &aconnector->base,
8330                         &amdgpu_dm_connector_funcs,
8331                         connector_type,
8332                         &i2c->base);
8333
8334         if (res) {
8335                 DRM_ERROR("connector_init failed\n");
8336                 aconnector->connector_id = -1;
8337                 goto out_free;
8338         }
8339
8340         drm_connector_helper_add(
8341                         &aconnector->base,
8342                         &amdgpu_dm_connector_helper_funcs);
8343
8344         amdgpu_dm_connector_init_helper(
8345                 dm,
8346                 aconnector,
8347                 connector_type,
8348                 link,
8349                 link_index);
8350
8351         drm_connector_attach_encoder(
8352                 &aconnector->base, &aencoder->base);
8353
8354         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8355                 || connector_type == DRM_MODE_CONNECTOR_eDP)
8356                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8357
8358 out_free:
8359         if (res) {
8360                 kfree(i2c);
8361                 aconnector->i2c = NULL;
8362         }
8363         return res;
8364 }
8365
8366 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8367 {
8368         switch (adev->mode_info.num_crtc) {
8369         case 1:
8370                 return 0x1;
8371         case 2:
8372                 return 0x3;
8373         case 3:
8374                 return 0x7;
8375         case 4:
8376                 return 0xf;
8377         case 5:
8378                 return 0x1f;
8379         case 6:
8380         default:
8381                 return 0x3f;
8382         }
8383 }
8384
8385 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8386                                   struct amdgpu_encoder *aencoder,
8387                                   uint32_t link_index)
8388 {
8389         struct amdgpu_device *adev = drm_to_adev(dev);
8390
8391         int res = drm_encoder_init(dev,
8392                                    &aencoder->base,
8393                                    &amdgpu_dm_encoder_funcs,
8394                                    DRM_MODE_ENCODER_TMDS,
8395                                    NULL);
8396
8397         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8398
8399         if (!res)
8400                 aencoder->encoder_id = link_index;
8401         else
8402                 aencoder->encoder_id = -1;
8403
8404         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8405
8406         return res;
8407 }
8408
8409 static void manage_dm_interrupts(struct amdgpu_device *adev,
8410                                  struct amdgpu_crtc *acrtc,
8411                                  bool enable)
8412 {
8413         /*
8414          * We have no guarantee that the frontend index maps to the same
8415          * backend index - some even map to more than one.
8416          *
8417          * TODO: Use a different interrupt or check DC itself for the mapping.
8418          */
8419         int irq_type =
8420                 amdgpu_display_crtc_idx_to_irq_type(
8421                         adev,
8422                         acrtc->crtc_id);
8423
8424         if (enable) {
8425                 drm_crtc_vblank_on(&acrtc->base);
8426                 amdgpu_irq_get(
8427                         adev,
8428                         &adev->pageflip_irq,
8429                         irq_type);
8430 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8431                 amdgpu_irq_get(
8432                         adev,
8433                         &adev->vline0_irq,
8434                         irq_type);
8435 #endif
8436         } else {
8437 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8438                 amdgpu_irq_put(
8439                         adev,
8440                         &adev->vline0_irq,
8441                         irq_type);
8442 #endif
8443                 amdgpu_irq_put(
8444                         adev,
8445                         &adev->pageflip_irq,
8446                         irq_type);
8447                 drm_crtc_vblank_off(&acrtc->base);
8448         }
8449 }
8450
8451 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8452                                       struct amdgpu_crtc *acrtc)
8453 {
8454         int irq_type =
8455                 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8456
8457         /**
8458          * This reads the current state for the IRQ and force reapplies
8459          * the setting to hardware.
8460          */
8461         amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8462 }
8463
8464 static bool
8465 is_scaling_state_different(const struct dm_connector_state *dm_state,
8466                            const struct dm_connector_state *old_dm_state)
8467 {
8468         if (dm_state->scaling != old_dm_state->scaling)
8469                 return true;
8470         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8471                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8472                         return true;
8473         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8474                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8475                         return true;
8476         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8477                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8478                 return true;
8479         return false;
8480 }
8481
8482 #ifdef CONFIG_DRM_AMD_DC_HDCP
8483 static bool is_content_protection_different(struct drm_connector_state *state,
8484                                             const struct drm_connector_state *old_state,
8485                                             const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8486 {
8487         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8488         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8489
8490         /* Handle: Type0/1 change */
8491         if (old_state->hdcp_content_type != state->hdcp_content_type &&
8492             state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8493                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8494                 return true;
8495         }
8496
8497         /* CP is being re enabled, ignore this
8498          *
8499          * Handles:     ENABLED -> DESIRED
8500          */
8501         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8502             state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8503                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8504                 return false;
8505         }
8506
8507         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8508          *
8509          * Handles:     UNDESIRED -> ENABLED
8510          */
8511         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8512             state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8513                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8514
8515         /* Stream removed and re-enabled
8516          *
8517          * Can sometimes overlap with the HPD case,
8518          * thus set update_hdcp to false to avoid
8519          * setting HDCP multiple times.
8520          *
8521          * Handles:     DESIRED -> DESIRED (Special case)
8522          */
8523         if (!(old_state->crtc && old_state->crtc->enabled) &&
8524                 state->crtc && state->crtc->enabled &&
8525                 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8526                 dm_con_state->update_hdcp = false;
8527                 return true;
8528         }
8529
8530         /* Hot-plug, headless s3, dpms
8531          *
8532          * Only start HDCP if the display is connected/enabled.
8533          * update_hdcp flag will be set to false until the next
8534          * HPD comes in.
8535          *
8536          * Handles:     DESIRED -> DESIRED (Special case)
8537          */
8538         if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8539             connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8540                 dm_con_state->update_hdcp = false;
8541                 return true;
8542         }
8543
8544         /*
8545          * Handles:     UNDESIRED -> UNDESIRED
8546          *              DESIRED -> DESIRED
8547          *              ENABLED -> ENABLED
8548          */
8549         if (old_state->content_protection == state->content_protection)
8550                 return false;
8551
8552         /*
8553          * Handles:     UNDESIRED -> DESIRED
8554          *              DESIRED -> UNDESIRED
8555          *              ENABLED -> UNDESIRED
8556          */
8557         if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8558                 return true;
8559
8560         /*
8561          * Handles:     DESIRED -> ENABLED
8562          */
8563         return false;
8564 }
8565
8566 #endif
8567 static void remove_stream(struct amdgpu_device *adev,
8568                           struct amdgpu_crtc *acrtc,
8569                           struct dc_stream_state *stream)
8570 {
8571         /* this is the update mode case */
8572
8573         acrtc->otg_inst = -1;
8574         acrtc->enabled = false;
8575 }
8576
8577 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8578                                struct dc_cursor_position *position)
8579 {
8580         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8581         int x, y;
8582         int xorigin = 0, yorigin = 0;
8583
8584         if (!crtc || !plane->state->fb)
8585                 return 0;
8586
8587         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8588             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8589                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8590                           __func__,
8591                           plane->state->crtc_w,
8592                           plane->state->crtc_h);
8593                 return -EINVAL;
8594         }
8595
8596         x = plane->state->crtc_x;
8597         y = plane->state->crtc_y;
8598
8599         if (x <= -amdgpu_crtc->max_cursor_width ||
8600             y <= -amdgpu_crtc->max_cursor_height)
8601                 return 0;
8602
8603         if (x < 0) {
8604                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8605                 x = 0;
8606         }
8607         if (y < 0) {
8608                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8609                 y = 0;
8610         }
8611         position->enable = true;
8612         position->translate_by_source = true;
8613         position->x = x;
8614         position->y = y;
8615         position->x_hotspot = xorigin;
8616         position->y_hotspot = yorigin;
8617
8618         return 0;
8619 }
8620
8621 static void handle_cursor_update(struct drm_plane *plane,
8622                                  struct drm_plane_state *old_plane_state)
8623 {
8624         struct amdgpu_device *adev = drm_to_adev(plane->dev);
8625         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8626         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8627         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8628         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8629         uint64_t address = afb ? afb->address : 0;
8630         struct dc_cursor_position position = {0};
8631         struct dc_cursor_attributes attributes;
8632         int ret;
8633
8634         if (!plane->state->fb && !old_plane_state->fb)
8635                 return;
8636
8637         DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8638                       __func__,
8639                       amdgpu_crtc->crtc_id,
8640                       plane->state->crtc_w,
8641                       plane->state->crtc_h);
8642
8643         ret = get_cursor_position(plane, crtc, &position);
8644         if (ret)
8645                 return;
8646
8647         if (!position.enable) {
8648                 /* turn off cursor */
8649                 if (crtc_state && crtc_state->stream) {
8650                         mutex_lock(&adev->dm.dc_lock);
8651                         dc_stream_set_cursor_position(crtc_state->stream,
8652                                                       &position);
8653                         mutex_unlock(&adev->dm.dc_lock);
8654                 }
8655                 return;
8656         }
8657
8658         amdgpu_crtc->cursor_width = plane->state->crtc_w;
8659         amdgpu_crtc->cursor_height = plane->state->crtc_h;
8660
8661         memset(&attributes, 0, sizeof(attributes));
8662         attributes.address.high_part = upper_32_bits(address);
8663         attributes.address.low_part  = lower_32_bits(address);
8664         attributes.width             = plane->state->crtc_w;
8665         attributes.height            = plane->state->crtc_h;
8666         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8667         attributes.rotation_angle    = 0;
8668         attributes.attribute_flags.value = 0;
8669
8670         attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8671
8672         if (crtc_state->stream) {
8673                 mutex_lock(&adev->dm.dc_lock);
8674                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8675                                                          &attributes))
8676                         DRM_ERROR("DC failed to set cursor attributes\n");
8677
8678                 if (!dc_stream_set_cursor_position(crtc_state->stream,
8679                                                    &position))
8680                         DRM_ERROR("DC failed to set cursor position\n");
8681                 mutex_unlock(&adev->dm.dc_lock);
8682         }
8683 }
8684
8685 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8686 {
8687
8688         assert_spin_locked(&acrtc->base.dev->event_lock);
8689         WARN_ON(acrtc->event);
8690
8691         acrtc->event = acrtc->base.state->event;
8692
8693         /* Set the flip status */
8694         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8695
8696         /* Mark this event as consumed */
8697         acrtc->base.state->event = NULL;
8698
8699         DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8700                      acrtc->crtc_id);
8701 }
8702
8703 static void update_freesync_state_on_stream(
8704         struct amdgpu_display_manager *dm,
8705         struct dm_crtc_state *new_crtc_state,
8706         struct dc_stream_state *new_stream,
8707         struct dc_plane_state *surface,
8708         u32 flip_timestamp_in_us)
8709 {
8710         struct mod_vrr_params vrr_params;
8711         struct dc_info_packet vrr_infopacket = {0};
8712         struct amdgpu_device *adev = dm->adev;
8713         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8714         unsigned long flags;
8715         bool pack_sdp_v1_3 = false;
8716
8717         if (!new_stream)
8718                 return;
8719
8720         /*
8721          * TODO: Determine why min/max totals and vrefresh can be 0 here.
8722          * For now it's sufficient to just guard against these conditions.
8723          */
8724
8725         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8726                 return;
8727
8728         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8729         vrr_params = acrtc->dm_irq_params.vrr_params;
8730
8731         if (surface) {
8732                 mod_freesync_handle_preflip(
8733                         dm->freesync_module,
8734                         surface,
8735                         new_stream,
8736                         flip_timestamp_in_us,
8737                         &vrr_params);
8738
8739                 if (adev->family < AMDGPU_FAMILY_AI &&
8740                     amdgpu_dm_vrr_active(new_crtc_state)) {
8741                         mod_freesync_handle_v_update(dm->freesync_module,
8742                                                      new_stream, &vrr_params);
8743
8744                         /* Need to call this before the frame ends. */
8745                         dc_stream_adjust_vmin_vmax(dm->dc,
8746                                                    new_crtc_state->stream,
8747                                                    &vrr_params.adjust);
8748                 }
8749         }
8750
8751         mod_freesync_build_vrr_infopacket(
8752                 dm->freesync_module,
8753                 new_stream,
8754                 &vrr_params,
8755                 PACKET_TYPE_VRR,
8756                 TRANSFER_FUNC_UNKNOWN,
8757                 &vrr_infopacket,
8758                 pack_sdp_v1_3);
8759
8760         new_crtc_state->freesync_timing_changed |=
8761                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8762                         &vrr_params.adjust,
8763                         sizeof(vrr_params.adjust)) != 0);
8764
8765         new_crtc_state->freesync_vrr_info_changed |=
8766                 (memcmp(&new_crtc_state->vrr_infopacket,
8767                         &vrr_infopacket,
8768                         sizeof(vrr_infopacket)) != 0);
8769
8770         acrtc->dm_irq_params.vrr_params = vrr_params;
8771         new_crtc_state->vrr_infopacket = vrr_infopacket;
8772
8773         new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8774         new_stream->vrr_infopacket = vrr_infopacket;
8775
8776         if (new_crtc_state->freesync_vrr_info_changed)
8777                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8778                               new_crtc_state->base.crtc->base.id,
8779                               (int)new_crtc_state->base.vrr_enabled,
8780                               (int)vrr_params.state);
8781
8782         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8783 }
8784
8785 static void update_stream_irq_parameters(
8786         struct amdgpu_display_manager *dm,
8787         struct dm_crtc_state *new_crtc_state)
8788 {
8789         struct dc_stream_state *new_stream = new_crtc_state->stream;
8790         struct mod_vrr_params vrr_params;
8791         struct mod_freesync_config config = new_crtc_state->freesync_config;
8792         struct amdgpu_device *adev = dm->adev;
8793         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8794         unsigned long flags;
8795
8796         if (!new_stream)
8797                 return;
8798
8799         /*
8800          * TODO: Determine why min/max totals and vrefresh can be 0 here.
8801          * For now it's sufficient to just guard against these conditions.
8802          */
8803         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8804                 return;
8805
8806         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8807         vrr_params = acrtc->dm_irq_params.vrr_params;
8808
8809         if (new_crtc_state->vrr_supported &&
8810             config.min_refresh_in_uhz &&
8811             config.max_refresh_in_uhz) {
8812                 /*
8813                  * if freesync compatible mode was set, config.state will be set
8814                  * in atomic check
8815                  */
8816                 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8817                     (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8818                      new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8819                         vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8820                         vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8821                         vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8822                         vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8823                 } else {
8824                         config.state = new_crtc_state->base.vrr_enabled ?
8825                                                      VRR_STATE_ACTIVE_VARIABLE :
8826                                                      VRR_STATE_INACTIVE;
8827                 }
8828         } else {
8829                 config.state = VRR_STATE_UNSUPPORTED;
8830         }
8831
8832         mod_freesync_build_vrr_params(dm->freesync_module,
8833                                       new_stream,
8834                                       &config, &vrr_params);
8835
8836         new_crtc_state->freesync_timing_changed |=
8837                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8838                         &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8839
8840         new_crtc_state->freesync_config = config;
8841         /* Copy state for access from DM IRQ handler */
8842         acrtc->dm_irq_params.freesync_config = config;
8843         acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8844         acrtc->dm_irq_params.vrr_params = vrr_params;
8845         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8846 }
8847
8848 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8849                                             struct dm_crtc_state *new_state)
8850 {
8851         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8852         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8853
8854         if (!old_vrr_active && new_vrr_active) {
8855                 /* Transition VRR inactive -> active:
8856                  * While VRR is active, we must not disable vblank irq, as a
8857                  * reenable after disable would compute bogus vblank/pflip
8858                  * timestamps if it likely happened inside display front-porch.
8859                  *
8860                  * We also need vupdate irq for the actual core vblank handling
8861                  * at end of vblank.
8862                  */
8863                 dm_set_vupdate_irq(new_state->base.crtc, true);
8864                 drm_crtc_vblank_get(new_state->base.crtc);
8865                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8866                                  __func__, new_state->base.crtc->base.id);
8867         } else if (old_vrr_active && !new_vrr_active) {
8868                 /* Transition VRR active -> inactive:
8869                  * Allow vblank irq disable again for fixed refresh rate.
8870                  */
8871                 dm_set_vupdate_irq(new_state->base.crtc, false);
8872                 drm_crtc_vblank_put(new_state->base.crtc);
8873                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8874                                  __func__, new_state->base.crtc->base.id);
8875         }
8876 }
8877
8878 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8879 {
8880         struct drm_plane *plane;
8881         struct drm_plane_state *old_plane_state;
8882         int i;
8883
8884         /*
8885          * TODO: Make this per-stream so we don't issue redundant updates for
8886          * commits with multiple streams.
8887          */
8888         for_each_old_plane_in_state(state, plane, old_plane_state, i)
8889                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8890                         handle_cursor_update(plane, old_plane_state);
8891 }
8892
8893 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8894                                     struct dc_state *dc_state,
8895                                     struct drm_device *dev,
8896                                     struct amdgpu_display_manager *dm,
8897                                     struct drm_crtc *pcrtc,
8898                                     bool wait_for_vblank)
8899 {
8900         uint32_t i;
8901         uint64_t timestamp_ns;
8902         struct drm_plane *plane;
8903         struct drm_plane_state *old_plane_state, *new_plane_state;
8904         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8905         struct drm_crtc_state *new_pcrtc_state =
8906                         drm_atomic_get_new_crtc_state(state, pcrtc);
8907         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8908         struct dm_crtc_state *dm_old_crtc_state =
8909                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8910         int planes_count = 0, vpos, hpos;
8911         long r;
8912         unsigned long flags;
8913         struct amdgpu_bo *abo;
8914         uint32_t target_vblank, last_flip_vblank;
8915         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8916         bool pflip_present = false;
8917         struct {
8918                 struct dc_surface_update surface_updates[MAX_SURFACES];
8919                 struct dc_plane_info plane_infos[MAX_SURFACES];
8920                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8921                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8922                 struct dc_stream_update stream_update;
8923         } *bundle;
8924
8925         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8926
8927         if (!bundle) {
8928                 dm_error("Failed to allocate update bundle\n");
8929                 goto cleanup;
8930         }
8931
8932         /*
8933          * Disable the cursor first if we're disabling all the planes.
8934          * It'll remain on the screen after the planes are re-enabled
8935          * if we don't.
8936          */
8937         if (acrtc_state->active_planes == 0)
8938                 amdgpu_dm_commit_cursors(state);
8939
8940         /* update planes when needed */
8941         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8942                 struct drm_crtc *crtc = new_plane_state->crtc;
8943                 struct drm_crtc_state *new_crtc_state;
8944                 struct drm_framebuffer *fb = new_plane_state->fb;
8945                 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8946                 bool plane_needs_flip;
8947                 struct dc_plane_state *dc_plane;
8948                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8949
8950                 /* Cursor plane is handled after stream updates */
8951                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8952                         continue;
8953
8954                 if (!fb || !crtc || pcrtc != crtc)
8955                         continue;
8956
8957                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8958                 if (!new_crtc_state->active)
8959                         continue;
8960
8961                 dc_plane = dm_new_plane_state->dc_state;
8962
8963                 bundle->surface_updates[planes_count].surface = dc_plane;
8964                 if (new_pcrtc_state->color_mgmt_changed) {
8965                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8966                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8967                         bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8968                 }
8969
8970                 fill_dc_scaling_info(new_plane_state,
8971                                      &bundle->scaling_infos[planes_count]);
8972
8973                 bundle->surface_updates[planes_count].scaling_info =
8974                         &bundle->scaling_infos[planes_count];
8975
8976                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8977
8978                 pflip_present = pflip_present || plane_needs_flip;
8979
8980                 if (!plane_needs_flip) {
8981                         planes_count += 1;
8982                         continue;
8983                 }
8984
8985                 abo = gem_to_amdgpu_bo(fb->obj[0]);
8986
8987                 /*
8988                  * Wait for all fences on this FB. Do limited wait to avoid
8989                  * deadlock during GPU reset when this fence will not signal
8990                  * but we hold reservation lock for the BO.
8991                  */
8992                 r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
8993                                           msecs_to_jiffies(5000));
8994                 if (unlikely(r <= 0))
8995                         DRM_ERROR("Waiting for fences timed out!");
8996
8997                 fill_dc_plane_info_and_addr(
8998                         dm->adev, new_plane_state,
8999                         afb->tiling_flags,
9000                         &bundle->plane_infos[planes_count],
9001                         &bundle->flip_addrs[planes_count].address,
9002                         afb->tmz_surface, false);
9003
9004                 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
9005                                  new_plane_state->plane->index,
9006                                  bundle->plane_infos[planes_count].dcc.enable);
9007
9008                 bundle->surface_updates[planes_count].plane_info =
9009                         &bundle->plane_infos[planes_count];
9010
9011                 /*
9012                  * Only allow immediate flips for fast updates that don't
9013                  * change FB pitch, DCC state, rotation or mirroing.
9014                  */
9015                 bundle->flip_addrs[planes_count].flip_immediate =
9016                         crtc->state->async_flip &&
9017                         acrtc_state->update_type == UPDATE_TYPE_FAST;
9018
9019                 timestamp_ns = ktime_get_ns();
9020                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9021                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9022                 bundle->surface_updates[planes_count].surface = dc_plane;
9023
9024                 if (!bundle->surface_updates[planes_count].surface) {
9025                         DRM_ERROR("No surface for CRTC: id=%d\n",
9026                                         acrtc_attach->crtc_id);
9027                         continue;
9028                 }
9029
9030                 if (plane == pcrtc->primary)
9031                         update_freesync_state_on_stream(
9032                                 dm,
9033                                 acrtc_state,
9034                                 acrtc_state->stream,
9035                                 dc_plane,
9036                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
9037
9038                 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
9039                                  __func__,
9040                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9041                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
9042
9043                 planes_count += 1;
9044
9045         }
9046
9047         if (pflip_present) {
9048                 if (!vrr_active) {
9049                         /* Use old throttling in non-vrr fixed refresh rate mode
9050                          * to keep flip scheduling based on target vblank counts
9051                          * working in a backwards compatible way, e.g., for
9052                          * clients using the GLX_OML_sync_control extension or
9053                          * DRI3/Present extension with defined target_msc.
9054                          */
9055                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
9056                 }
9057                 else {
9058                         /* For variable refresh rate mode only:
9059                          * Get vblank of last completed flip to avoid > 1 vrr
9060                          * flips per video frame by use of throttling, but allow
9061                          * flip programming anywhere in the possibly large
9062                          * variable vrr vblank interval for fine-grained flip
9063                          * timing control and more opportunity to avoid stutter
9064                          * on late submission of flips.
9065                          */
9066                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9067                         last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
9068                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9069                 }
9070
9071                 target_vblank = last_flip_vblank + wait_for_vblank;
9072
9073                 /*
9074                  * Wait until we're out of the vertical blank period before the one
9075                  * targeted by the flip
9076                  */
9077                 while ((acrtc_attach->enabled &&
9078                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9079                                                             0, &vpos, &hpos, NULL,
9080                                                             NULL, &pcrtc->hwmode)
9081                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9082                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9083                         (int)(target_vblank -
9084                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
9085                         usleep_range(1000, 1100);
9086                 }
9087
9088                 /**
9089                  * Prepare the flip event for the pageflip interrupt to handle.
9090                  *
9091                  * This only works in the case where we've already turned on the
9092                  * appropriate hardware blocks (eg. HUBP) so in the transition case
9093                  * from 0 -> n planes we have to skip a hardware generated event
9094                  * and rely on sending it from software.
9095                  */
9096                 if (acrtc_attach->base.state->event &&
9097                     acrtc_state->active_planes > 0) {
9098                         drm_crtc_vblank_get(pcrtc);
9099
9100                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9101
9102                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9103                         prepare_flip_isr(acrtc_attach);
9104
9105                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9106                 }
9107
9108                 if (acrtc_state->stream) {
9109                         if (acrtc_state->freesync_vrr_info_changed)
9110                                 bundle->stream_update.vrr_infopacket =
9111                                         &acrtc_state->stream->vrr_infopacket;
9112                 }
9113         }
9114
9115         /* Update the planes if changed or disable if we don't have any. */
9116         if ((planes_count || acrtc_state->active_planes == 0) &&
9117                 acrtc_state->stream) {
9118 #if defined(CONFIG_DRM_AMD_DC_DCN)
9119                 /*
9120                  * If PSR or idle optimizations are enabled then flush out
9121                  * any pending work before hardware programming.
9122                  */
9123                 if (dm->vblank_control_workqueue)
9124                         flush_workqueue(dm->vblank_control_workqueue);
9125 #endif
9126
9127                 bundle->stream_update.stream = acrtc_state->stream;
9128                 if (new_pcrtc_state->mode_changed) {
9129                         bundle->stream_update.src = acrtc_state->stream->src;
9130                         bundle->stream_update.dst = acrtc_state->stream->dst;
9131                 }
9132
9133                 if (new_pcrtc_state->color_mgmt_changed) {
9134                         /*
9135                          * TODO: This isn't fully correct since we've actually
9136                          * already modified the stream in place.
9137                          */
9138                         bundle->stream_update.gamut_remap =
9139                                 &acrtc_state->stream->gamut_remap_matrix;
9140                         bundle->stream_update.output_csc_transform =
9141                                 &acrtc_state->stream->csc_color_matrix;
9142                         bundle->stream_update.out_transfer_func =
9143                                 acrtc_state->stream->out_transfer_func;
9144                 }
9145
9146                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
9147                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
9148                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
9149
9150                 /*
9151                  * If FreeSync state on the stream has changed then we need to
9152                  * re-adjust the min/max bounds now that DC doesn't handle this
9153                  * as part of commit.
9154                  */
9155                 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
9156                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9157                         dc_stream_adjust_vmin_vmax(
9158                                 dm->dc, acrtc_state->stream,
9159                                 &acrtc_attach->dm_irq_params.vrr_params.adjust);
9160                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9161                 }
9162                 mutex_lock(&dm->dc_lock);
9163                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9164                                 acrtc_state->stream->link->psr_settings.psr_allow_active)
9165                         amdgpu_dm_psr_disable(acrtc_state->stream);
9166
9167                 dc_commit_updates_for_stream(dm->dc,
9168                                                      bundle->surface_updates,
9169                                                      planes_count,
9170                                                      acrtc_state->stream,
9171                                                      &bundle->stream_update,
9172                                                      dc_state);
9173
9174                 /**
9175                  * Enable or disable the interrupts on the backend.
9176                  *
9177                  * Most pipes are put into power gating when unused.
9178                  *
9179                  * When power gating is enabled on a pipe we lose the
9180                  * interrupt enablement state when power gating is disabled.
9181                  *
9182                  * So we need to update the IRQ control state in hardware
9183                  * whenever the pipe turns on (since it could be previously
9184                  * power gated) or off (since some pipes can't be power gated
9185                  * on some ASICs).
9186                  */
9187                 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9188                         dm_update_pflip_irq_state(drm_to_adev(dev),
9189                                                   acrtc_attach);
9190
9191                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9192                                 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9193                                 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
9194                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
9195
9196                 /* Decrement skip count when PSR is enabled and we're doing fast updates. */
9197                 if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9198                     acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9199                         struct amdgpu_dm_connector *aconn =
9200                                 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9201
9202                         if (aconn->psr_skip_count > 0)
9203                                 aconn->psr_skip_count--;
9204
9205                         /* Allow PSR when skip count is 0. */
9206                         acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9207                 } else {
9208                         acrtc_attach->dm_irq_params.allow_psr_entry = false;
9209                 }
9210
9211                 mutex_unlock(&dm->dc_lock);
9212         }
9213
9214         /*
9215          * Update cursor state *after* programming all the planes.
9216          * This avoids redundant programming in the case where we're going
9217          * to be disabling a single plane - those pipes are being disabled.
9218          */
9219         if (acrtc_state->active_planes)
9220                 amdgpu_dm_commit_cursors(state);
9221
9222 cleanup:
9223         kfree(bundle);
9224 }
9225
9226 static void amdgpu_dm_commit_audio(struct drm_device *dev,
9227                                    struct drm_atomic_state *state)
9228 {
9229         struct amdgpu_device *adev = drm_to_adev(dev);
9230         struct amdgpu_dm_connector *aconnector;
9231         struct drm_connector *connector;
9232         struct drm_connector_state *old_con_state, *new_con_state;
9233         struct drm_crtc_state *new_crtc_state;
9234         struct dm_crtc_state *new_dm_crtc_state;
9235         const struct dc_stream_status *status;
9236         int i, inst;
9237
9238         /* Notify device removals. */
9239         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9240                 if (old_con_state->crtc != new_con_state->crtc) {
9241                         /* CRTC changes require notification. */
9242                         goto notify;
9243                 }
9244
9245                 if (!new_con_state->crtc)
9246                         continue;
9247
9248                 new_crtc_state = drm_atomic_get_new_crtc_state(
9249                         state, new_con_state->crtc);
9250
9251                 if (!new_crtc_state)
9252                         continue;
9253
9254                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9255                         continue;
9256
9257         notify:
9258                 aconnector = to_amdgpu_dm_connector(connector);
9259
9260                 mutex_lock(&adev->dm.audio_lock);
9261                 inst = aconnector->audio_inst;
9262                 aconnector->audio_inst = -1;
9263                 mutex_unlock(&adev->dm.audio_lock);
9264
9265                 amdgpu_dm_audio_eld_notify(adev, inst);
9266         }
9267
9268         /* Notify audio device additions. */
9269         for_each_new_connector_in_state(state, connector, new_con_state, i) {
9270                 if (!new_con_state->crtc)
9271                         continue;
9272
9273                 new_crtc_state = drm_atomic_get_new_crtc_state(
9274                         state, new_con_state->crtc);
9275
9276                 if (!new_crtc_state)
9277                         continue;
9278
9279                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9280                         continue;
9281
9282                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9283                 if (!new_dm_crtc_state->stream)
9284                         continue;
9285
9286                 status = dc_stream_get_status(new_dm_crtc_state->stream);
9287                 if (!status)
9288                         continue;
9289
9290                 aconnector = to_amdgpu_dm_connector(connector);
9291
9292                 mutex_lock(&adev->dm.audio_lock);
9293                 inst = status->audio_inst;
9294                 aconnector->audio_inst = inst;
9295                 mutex_unlock(&adev->dm.audio_lock);
9296
9297                 amdgpu_dm_audio_eld_notify(adev, inst);
9298         }
9299 }
9300
9301 /*
9302  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9303  * @crtc_state: the DRM CRTC state
9304  * @stream_state: the DC stream state.
9305  *
9306  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9307  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9308  */
9309 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9310                                                 struct dc_stream_state *stream_state)
9311 {
9312         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9313 }
9314
9315 /**
9316  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9317  * @state: The atomic state to commit
9318  *
9319  * This will tell DC to commit the constructed DC state from atomic_check,
9320  * programming the hardware. Any failures here implies a hardware failure, since
9321  * atomic check should have filtered anything non-kosher.
9322  */
9323 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9324 {
9325         struct drm_device *dev = state->dev;
9326         struct amdgpu_device *adev = drm_to_adev(dev);
9327         struct amdgpu_display_manager *dm = &adev->dm;
9328         struct dm_atomic_state *dm_state;
9329         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
9330         uint32_t i, j;
9331         struct drm_crtc *crtc;
9332         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9333         unsigned long flags;
9334         bool wait_for_vblank = true;
9335         struct drm_connector *connector;
9336         struct drm_connector_state *old_con_state, *new_con_state;
9337         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9338         int crtc_disable_count = 0;
9339         bool mode_set_reset_required = false;
9340
9341         trace_amdgpu_dm_atomic_commit_tail_begin(state);
9342
9343         drm_atomic_helper_update_legacy_modeset_state(dev, state);
9344
9345         dm_state = dm_atomic_get_new_state(state);
9346         if (dm_state && dm_state->context) {
9347                 dc_state = dm_state->context;
9348         } else {
9349                 /* No state changes, retain current state. */
9350                 dc_state_temp = dc_create_state(dm->dc);
9351                 ASSERT(dc_state_temp);
9352                 dc_state = dc_state_temp;
9353                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
9354         }
9355
9356         for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9357                                        new_crtc_state, i) {
9358                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9359
9360                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9361
9362                 if (old_crtc_state->active &&
9363                     (!new_crtc_state->active ||
9364                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9365                         manage_dm_interrupts(adev, acrtc, false);
9366                         dc_stream_release(dm_old_crtc_state->stream);
9367                 }
9368         }
9369
9370         drm_atomic_helper_calc_timestamping_constants(state);
9371
9372         /* update changed items */
9373         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9374                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9375
9376                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9377                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9378
9379                 DRM_DEBUG_ATOMIC(
9380                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9381                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9382                         "connectors_changed:%d\n",
9383                         acrtc->crtc_id,
9384                         new_crtc_state->enable,
9385                         new_crtc_state->active,
9386                         new_crtc_state->planes_changed,
9387                         new_crtc_state->mode_changed,
9388                         new_crtc_state->active_changed,
9389                         new_crtc_state->connectors_changed);
9390
9391                 /* Disable cursor if disabling crtc */
9392                 if (old_crtc_state->active && !new_crtc_state->active) {
9393                         struct dc_cursor_position position;
9394
9395                         memset(&position, 0, sizeof(position));
9396                         mutex_lock(&dm->dc_lock);
9397                         dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9398                         mutex_unlock(&dm->dc_lock);
9399                 }
9400
9401                 /* Copy all transient state flags into dc state */
9402                 if (dm_new_crtc_state->stream) {
9403                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9404                                                             dm_new_crtc_state->stream);
9405                 }
9406
9407                 /* handles headless hotplug case, updating new_state and
9408                  * aconnector as needed
9409                  */
9410
9411                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9412
9413                         DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9414
9415                         if (!dm_new_crtc_state->stream) {
9416                                 /*
9417                                  * this could happen because of issues with
9418                                  * userspace notifications delivery.
9419                                  * In this case userspace tries to set mode on
9420                                  * display which is disconnected in fact.
9421                                  * dc_sink is NULL in this case on aconnector.
9422                                  * We expect reset mode will come soon.
9423                                  *
9424                                  * This can also happen when unplug is done
9425                                  * during resume sequence ended
9426                                  *
9427                                  * In this case, we want to pretend we still
9428                                  * have a sink to keep the pipe running so that
9429                                  * hw state is consistent with the sw state
9430                                  */
9431                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9432                                                 __func__, acrtc->base.base.id);
9433                                 continue;
9434                         }
9435
9436                         if (dm_old_crtc_state->stream)
9437                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9438
9439                         pm_runtime_get_noresume(dev->dev);
9440
9441                         acrtc->enabled = true;
9442                         acrtc->hw_mode = new_crtc_state->mode;
9443                         crtc->hwmode = new_crtc_state->mode;
9444                         mode_set_reset_required = true;
9445                 } else if (modereset_required(new_crtc_state)) {
9446                         DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9447                         /* i.e. reset mode */
9448                         if (dm_old_crtc_state->stream)
9449                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9450
9451                         mode_set_reset_required = true;
9452                 }
9453         } /* for_each_crtc_in_state() */
9454
9455         if (dc_state) {
9456                 /* if there mode set or reset, disable eDP PSR */
9457                 if (mode_set_reset_required) {
9458 #if defined(CONFIG_DRM_AMD_DC_DCN)
9459                         if (dm->vblank_control_workqueue)
9460                                 flush_workqueue(dm->vblank_control_workqueue);
9461 #endif
9462                         amdgpu_dm_psr_disable_all(dm);
9463                 }
9464
9465                 dm_enable_per_frame_crtc_master_sync(dc_state);
9466                 mutex_lock(&dm->dc_lock);
9467                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
9468 #if defined(CONFIG_DRM_AMD_DC_DCN)
9469                /* Allow idle optimization when vblank count is 0 for display off */
9470                if (dm->active_vblank_irq_count == 0)
9471                    dc_allow_idle_optimizations(dm->dc,true);
9472 #endif
9473                 mutex_unlock(&dm->dc_lock);
9474         }
9475
9476         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9477                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9478
9479                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9480
9481                 if (dm_new_crtc_state->stream != NULL) {
9482                         const struct dc_stream_status *status =
9483                                         dc_stream_get_status(dm_new_crtc_state->stream);
9484
9485                         if (!status)
9486                                 status = dc_stream_get_status_from_state(dc_state,
9487                                                                          dm_new_crtc_state->stream);
9488                         if (!status)
9489                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
9490                         else
9491                                 acrtc->otg_inst = status->primary_otg_inst;
9492                 }
9493         }
9494 #ifdef CONFIG_DRM_AMD_DC_HDCP
9495         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9496                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9497                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9498                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9499
9500                 new_crtc_state = NULL;
9501
9502                 if (acrtc)
9503                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9504
9505                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9506
9507                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9508                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9509                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9510                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9511                         dm_new_con_state->update_hdcp = true;
9512                         continue;
9513                 }
9514
9515                 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9516                         hdcp_update_display(
9517                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9518                                 new_con_state->hdcp_content_type,
9519                                 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9520         }
9521 #endif
9522
9523         /* Handle connector state changes */
9524         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9525                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9526                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9527                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9528                 struct dc_surface_update dummy_updates[MAX_SURFACES];
9529                 struct dc_stream_update stream_update;
9530                 struct dc_info_packet hdr_packet;
9531                 struct dc_stream_status *status = NULL;
9532                 bool abm_changed, hdr_changed, scaling_changed;
9533
9534                 memset(&dummy_updates, 0, sizeof(dummy_updates));
9535                 memset(&stream_update, 0, sizeof(stream_update));
9536
9537                 if (acrtc) {
9538                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9539                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9540                 }
9541
9542                 /* Skip any modesets/resets */
9543                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9544                         continue;
9545
9546                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9547                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9548
9549                 scaling_changed = is_scaling_state_different(dm_new_con_state,
9550                                                              dm_old_con_state);
9551
9552                 abm_changed = dm_new_crtc_state->abm_level !=
9553                               dm_old_crtc_state->abm_level;
9554
9555                 hdr_changed =
9556                         !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9557
9558                 if (!scaling_changed && !abm_changed && !hdr_changed)
9559                         continue;
9560
9561                 stream_update.stream = dm_new_crtc_state->stream;
9562                 if (scaling_changed) {
9563                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9564                                         dm_new_con_state, dm_new_crtc_state->stream);
9565
9566                         stream_update.src = dm_new_crtc_state->stream->src;
9567                         stream_update.dst = dm_new_crtc_state->stream->dst;
9568                 }
9569
9570                 if (abm_changed) {
9571                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9572
9573                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
9574                 }
9575
9576                 if (hdr_changed) {
9577                         fill_hdr_info_packet(new_con_state, &hdr_packet);
9578                         stream_update.hdr_static_metadata = &hdr_packet;
9579                 }
9580
9581                 status = dc_stream_get_status(dm_new_crtc_state->stream);
9582
9583                 if (WARN_ON(!status))
9584                         continue;
9585
9586                 WARN_ON(!status->plane_count);
9587
9588                 /*
9589                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
9590                  * Here we create an empty update on each plane.
9591                  * To fix this, DC should permit updating only stream properties.
9592                  */
9593                 for (j = 0; j < status->plane_count; j++)
9594                         dummy_updates[j].surface = status->plane_states[0];
9595
9596
9597                 mutex_lock(&dm->dc_lock);
9598                 dc_commit_updates_for_stream(dm->dc,
9599                                                      dummy_updates,
9600                                                      status->plane_count,
9601                                                      dm_new_crtc_state->stream,
9602                                                      &stream_update,
9603                                                      dc_state);
9604                 mutex_unlock(&dm->dc_lock);
9605         }
9606
9607         /* Count number of newly disabled CRTCs for dropping PM refs later. */
9608         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9609                                       new_crtc_state, i) {
9610                 if (old_crtc_state->active && !new_crtc_state->active)
9611                         crtc_disable_count++;
9612
9613                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9614                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9615
9616                 /* For freesync config update on crtc state and params for irq */
9617                 update_stream_irq_parameters(dm, dm_new_crtc_state);
9618
9619                 /* Handle vrr on->off / off->on transitions */
9620                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9621                                                 dm_new_crtc_state);
9622         }
9623
9624         /**
9625          * Enable interrupts for CRTCs that are newly enabled or went through
9626          * a modeset. It was intentionally deferred until after the front end
9627          * state was modified to wait until the OTG was on and so the IRQ
9628          * handlers didn't access stale or invalid state.
9629          */
9630         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9631                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9632 #ifdef CONFIG_DEBUG_FS
9633                 bool configure_crc = false;
9634                 enum amdgpu_dm_pipe_crc_source cur_crc_src;
9635 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9636                 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9637 #endif
9638                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9639                 cur_crc_src = acrtc->dm_irq_params.crc_src;
9640                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9641 #endif
9642                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9643
9644                 if (new_crtc_state->active &&
9645                     (!old_crtc_state->active ||
9646                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9647                         dc_stream_retain(dm_new_crtc_state->stream);
9648                         acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9649                         manage_dm_interrupts(adev, acrtc, true);
9650
9651 #ifdef CONFIG_DEBUG_FS
9652                         /**
9653                          * Frontend may have changed so reapply the CRC capture
9654                          * settings for the stream.
9655                          */
9656                         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9657
9658                         if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9659                                 configure_crc = true;
9660 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9661                                 if (amdgpu_dm_crc_window_is_activated(crtc)) {
9662                                         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9663                                         acrtc->dm_irq_params.crc_window.update_win = true;
9664                                         acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9665                                         spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9666                                         crc_rd_wrk->crtc = crtc;
9667                                         spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9668                                         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9669                                 }
9670 #endif
9671                         }
9672
9673                         if (configure_crc)
9674                                 if (amdgpu_dm_crtc_configure_crc_source(
9675                                         crtc, dm_new_crtc_state, cur_crc_src))
9676                                         DRM_DEBUG_DRIVER("Failed to configure crc source");
9677 #endif
9678                 }
9679         }
9680
9681         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9682                 if (new_crtc_state->async_flip)
9683                         wait_for_vblank = false;
9684
9685         /* update planes when needed per crtc*/
9686         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9687                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9688
9689                 if (dm_new_crtc_state->stream)
9690                         amdgpu_dm_commit_planes(state, dc_state, dev,
9691                                                 dm, crtc, wait_for_vblank);
9692         }
9693
9694         /* Update audio instances for each connector. */
9695         amdgpu_dm_commit_audio(dev, state);
9696
9697 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||           \
9698         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9699         /* restore the backlight level */
9700         for (i = 0; i < dm->num_of_edps; i++) {
9701                 if (dm->backlight_dev[i] &&
9702                     (dm->actual_brightness[i] != dm->brightness[i]))
9703                         amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9704         }
9705 #endif
9706         /*
9707          * send vblank event on all events not handled in flip and
9708          * mark consumed event for drm_atomic_helper_commit_hw_done
9709          */
9710         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9711         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9712
9713                 if (new_crtc_state->event)
9714                         drm_send_event_locked(dev, &new_crtc_state->event->base);
9715
9716                 new_crtc_state->event = NULL;
9717         }
9718         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9719
9720         /* Signal HW programming completion */
9721         drm_atomic_helper_commit_hw_done(state);
9722
9723         if (wait_for_vblank)
9724                 drm_atomic_helper_wait_for_flip_done(dev, state);
9725
9726         drm_atomic_helper_cleanup_planes(dev, state);
9727
9728         /* return the stolen vga memory back to VRAM */
9729         if (!adev->mman.keep_stolen_vga_memory)
9730                 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9731         amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9732
9733         /*
9734          * Finally, drop a runtime PM reference for each newly disabled CRTC,
9735          * so we can put the GPU into runtime suspend if we're not driving any
9736          * displays anymore
9737          */
9738         for (i = 0; i < crtc_disable_count; i++)
9739                 pm_runtime_put_autosuspend(dev->dev);
9740         pm_runtime_mark_last_busy(dev->dev);
9741
9742         if (dc_state_temp)
9743                 dc_release_state(dc_state_temp);
9744 }
9745
9746
9747 static int dm_force_atomic_commit(struct drm_connector *connector)
9748 {
9749         int ret = 0;
9750         struct drm_device *ddev = connector->dev;
9751         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9752         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9753         struct drm_plane *plane = disconnected_acrtc->base.primary;
9754         struct drm_connector_state *conn_state;
9755         struct drm_crtc_state *crtc_state;
9756         struct drm_plane_state *plane_state;
9757
9758         if (!state)
9759                 return -ENOMEM;
9760
9761         state->acquire_ctx = ddev->mode_config.acquire_ctx;
9762
9763         /* Construct an atomic state to restore previous display setting */
9764
9765         /*
9766          * Attach connectors to drm_atomic_state
9767          */
9768         conn_state = drm_atomic_get_connector_state(state, connector);
9769
9770         ret = PTR_ERR_OR_ZERO(conn_state);
9771         if (ret)
9772                 goto out;
9773
9774         /* Attach crtc to drm_atomic_state*/
9775         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9776
9777         ret = PTR_ERR_OR_ZERO(crtc_state);
9778         if (ret)
9779                 goto out;
9780
9781         /* force a restore */
9782         crtc_state->mode_changed = true;
9783
9784         /* Attach plane to drm_atomic_state */
9785         plane_state = drm_atomic_get_plane_state(state, plane);
9786
9787         ret = PTR_ERR_OR_ZERO(plane_state);
9788         if (ret)
9789                 goto out;
9790
9791         /* Call commit internally with the state we just constructed */
9792         ret = drm_atomic_commit(state);
9793
9794 out:
9795         drm_atomic_state_put(state);
9796         if (ret)
9797                 DRM_ERROR("Restoring old state failed with %i\n", ret);
9798
9799         return ret;
9800 }
9801
9802 /*
9803  * This function handles all cases when set mode does not come upon hotplug.
9804  * This includes when a display is unplugged then plugged back into the
9805  * same port and when running without usermode desktop manager supprot
9806  */
9807 void dm_restore_drm_connector_state(struct drm_device *dev,
9808                                     struct drm_connector *connector)
9809 {
9810         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9811         struct amdgpu_crtc *disconnected_acrtc;
9812         struct dm_crtc_state *acrtc_state;
9813
9814         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9815                 return;
9816
9817         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9818         if (!disconnected_acrtc)
9819                 return;
9820
9821         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9822         if (!acrtc_state->stream)
9823                 return;
9824
9825         /*
9826          * If the previous sink is not released and different from the current,
9827          * we deduce we are in a state where we can not rely on usermode call
9828          * to turn on the display, so we do it here
9829          */
9830         if (acrtc_state->stream->sink != aconnector->dc_sink)
9831                 dm_force_atomic_commit(&aconnector->base);
9832 }
9833
9834 /*
9835  * Grabs all modesetting locks to serialize against any blocking commits,
9836  * Waits for completion of all non blocking commits.
9837  */
9838 static int do_aquire_global_lock(struct drm_device *dev,
9839                                  struct drm_atomic_state *state)
9840 {
9841         struct drm_crtc *crtc;
9842         struct drm_crtc_commit *commit;
9843         long ret;
9844
9845         /*
9846          * Adding all modeset locks to aquire_ctx will
9847          * ensure that when the framework release it the
9848          * extra locks we are locking here will get released to
9849          */
9850         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9851         if (ret)
9852                 return ret;
9853
9854         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9855                 spin_lock(&crtc->commit_lock);
9856                 commit = list_first_entry_or_null(&crtc->commit_list,
9857                                 struct drm_crtc_commit, commit_entry);
9858                 if (commit)
9859                         drm_crtc_commit_get(commit);
9860                 spin_unlock(&crtc->commit_lock);
9861
9862                 if (!commit)
9863                         continue;
9864
9865                 /*
9866                  * Make sure all pending HW programming completed and
9867                  * page flips done
9868                  */
9869                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9870
9871                 if (ret > 0)
9872                         ret = wait_for_completion_interruptible_timeout(
9873                                         &commit->flip_done, 10*HZ);
9874
9875                 if (ret == 0)
9876                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9877                                   "timed out\n", crtc->base.id, crtc->name);
9878
9879                 drm_crtc_commit_put(commit);
9880         }
9881
9882         return ret < 0 ? ret : 0;
9883 }
9884
9885 static void get_freesync_config_for_crtc(
9886         struct dm_crtc_state *new_crtc_state,
9887         struct dm_connector_state *new_con_state)
9888 {
9889         struct mod_freesync_config config = {0};
9890         struct amdgpu_dm_connector *aconnector =
9891                         to_amdgpu_dm_connector(new_con_state->base.connector);
9892         struct drm_display_mode *mode = &new_crtc_state->base.mode;
9893         int vrefresh = drm_mode_vrefresh(mode);
9894         bool fs_vid_mode = false;
9895
9896         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9897                                         vrefresh >= aconnector->min_vfreq &&
9898                                         vrefresh <= aconnector->max_vfreq;
9899
9900         if (new_crtc_state->vrr_supported) {
9901                 new_crtc_state->stream->ignore_msa_timing_param = true;
9902                 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9903
9904                 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9905                 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9906                 config.vsif_supported = true;
9907                 config.btr = true;
9908
9909                 if (fs_vid_mode) {
9910                         config.state = VRR_STATE_ACTIVE_FIXED;
9911                         config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9912                         goto out;
9913                 } else if (new_crtc_state->base.vrr_enabled) {
9914                         config.state = VRR_STATE_ACTIVE_VARIABLE;
9915                 } else {
9916                         config.state = VRR_STATE_INACTIVE;
9917                 }
9918         }
9919 out:
9920         new_crtc_state->freesync_config = config;
9921 }
9922
9923 static void reset_freesync_config_for_crtc(
9924         struct dm_crtc_state *new_crtc_state)
9925 {
9926         new_crtc_state->vrr_supported = false;
9927
9928         memset(&new_crtc_state->vrr_infopacket, 0,
9929                sizeof(new_crtc_state->vrr_infopacket));
9930 }
9931
9932 static bool
9933 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9934                                  struct drm_crtc_state *new_crtc_state)
9935 {
9936         struct drm_display_mode old_mode, new_mode;
9937
9938         if (!old_crtc_state || !new_crtc_state)
9939                 return false;
9940
9941         old_mode = old_crtc_state->mode;
9942         new_mode = new_crtc_state->mode;
9943
9944         if (old_mode.clock       == new_mode.clock &&
9945             old_mode.hdisplay    == new_mode.hdisplay &&
9946             old_mode.vdisplay    == new_mode.vdisplay &&
9947             old_mode.htotal      == new_mode.htotal &&
9948             old_mode.vtotal      != new_mode.vtotal &&
9949             old_mode.hsync_start == new_mode.hsync_start &&
9950             old_mode.vsync_start != new_mode.vsync_start &&
9951             old_mode.hsync_end   == new_mode.hsync_end &&
9952             old_mode.vsync_end   != new_mode.vsync_end &&
9953             old_mode.hskew       == new_mode.hskew &&
9954             old_mode.vscan       == new_mode.vscan &&
9955             (old_mode.vsync_end - old_mode.vsync_start) ==
9956             (new_mode.vsync_end - new_mode.vsync_start))
9957                 return true;
9958
9959         return false;
9960 }
9961
9962 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9963         uint64_t num, den, res;
9964         struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9965
9966         dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9967
9968         num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9969         den = (unsigned long long)new_crtc_state->mode.htotal *
9970               (unsigned long long)new_crtc_state->mode.vtotal;
9971
9972         res = div_u64(num, den);
9973         dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9974 }
9975
9976 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9977                                 struct drm_atomic_state *state,
9978                                 struct drm_crtc *crtc,
9979                                 struct drm_crtc_state *old_crtc_state,
9980                                 struct drm_crtc_state *new_crtc_state,
9981                                 bool enable,
9982                                 bool *lock_and_validation_needed)
9983 {
9984         struct dm_atomic_state *dm_state = NULL;
9985         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9986         struct dc_stream_state *new_stream;
9987         int ret = 0;
9988
9989         /*
9990          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9991          * update changed items
9992          */
9993         struct amdgpu_crtc *acrtc = NULL;
9994         struct amdgpu_dm_connector *aconnector = NULL;
9995         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9996         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
9997
9998         new_stream = NULL;
9999
10000         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10001         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10002         acrtc = to_amdgpu_crtc(crtc);
10003         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
10004
10005         /* TODO This hack should go away */
10006         if (aconnector && enable) {
10007                 /* Make sure fake sink is created in plug-in scenario */
10008                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10009                                                             &aconnector->base);
10010                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10011                                                             &aconnector->base);
10012
10013                 if (IS_ERR(drm_new_conn_state)) {
10014                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10015                         goto fail;
10016                 }
10017
10018                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10019                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
10020
10021                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10022                         goto skip_modeset;
10023
10024                 new_stream = create_validate_stream_for_sink(aconnector,
10025                                                              &new_crtc_state->mode,
10026                                                              dm_new_conn_state,
10027                                                              dm_old_crtc_state->stream);
10028
10029                 /*
10030                  * we can have no stream on ACTION_SET if a display
10031                  * was disconnected during S3, in this case it is not an
10032                  * error, the OS will be updated after detection, and
10033                  * will do the right thing on next atomic commit
10034                  */
10035
10036                 if (!new_stream) {
10037                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10038                                         __func__, acrtc->base.base.id);
10039                         ret = -ENOMEM;
10040                         goto fail;
10041                 }
10042
10043                 /*
10044                  * TODO: Check VSDB bits to decide whether this should
10045                  * be enabled or not.
10046                  */
10047                 new_stream->triggered_crtc_reset.enabled =
10048                         dm->force_timing_sync;
10049
10050                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10051
10052                 ret = fill_hdr_info_packet(drm_new_conn_state,
10053                                            &new_stream->hdr_static_metadata);
10054                 if (ret)
10055                         goto fail;
10056
10057                 /*
10058                  * If we already removed the old stream from the context
10059                  * (and set the new stream to NULL) then we can't reuse
10060                  * the old stream even if the stream and scaling are unchanged.
10061                  * We'll hit the BUG_ON and black screen.
10062                  *
10063                  * TODO: Refactor this function to allow this check to work
10064                  * in all conditions.
10065                  */
10066                 if (amdgpu_freesync_vid_mode &&
10067                     dm_new_crtc_state->stream &&
10068                     is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10069                         goto skip_modeset;
10070
10071                 if (dm_new_crtc_state->stream &&
10072                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10073                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10074                         new_crtc_state->mode_changed = false;
10075                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10076                                          new_crtc_state->mode_changed);
10077                 }
10078         }
10079
10080         /* mode_changed flag may get updated above, need to check again */
10081         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10082                 goto skip_modeset;
10083
10084         DRM_DEBUG_ATOMIC(
10085                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10086                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
10087                 "connectors_changed:%d\n",
10088                 acrtc->crtc_id,
10089                 new_crtc_state->enable,
10090                 new_crtc_state->active,
10091                 new_crtc_state->planes_changed,
10092                 new_crtc_state->mode_changed,
10093                 new_crtc_state->active_changed,
10094                 new_crtc_state->connectors_changed);
10095
10096         /* Remove stream for any changed/disabled CRTC */
10097         if (!enable) {
10098
10099                 if (!dm_old_crtc_state->stream)
10100                         goto skip_modeset;
10101
10102                 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
10103                     is_timing_unchanged_for_freesync(new_crtc_state,
10104                                                      old_crtc_state)) {
10105                         new_crtc_state->mode_changed = false;
10106                         DRM_DEBUG_DRIVER(
10107                                 "Mode change not required for front porch change, "
10108                                 "setting mode_changed to %d",
10109                                 new_crtc_state->mode_changed);
10110
10111                         set_freesync_fixed_config(dm_new_crtc_state);
10112
10113                         goto skip_modeset;
10114                 } else if (amdgpu_freesync_vid_mode && aconnector &&
10115                            is_freesync_video_mode(&new_crtc_state->mode,
10116                                                   aconnector)) {
10117                         struct drm_display_mode *high_mode;
10118
10119                         high_mode = get_highest_refresh_rate_mode(aconnector, false);
10120                         if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10121                                 set_freesync_fixed_config(dm_new_crtc_state);
10122                         }
10123                 }
10124
10125                 ret = dm_atomic_get_state(state, &dm_state);
10126                 if (ret)
10127                         goto fail;
10128
10129                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10130                                 crtc->base.id);
10131
10132                 /* i.e. reset mode */
10133                 if (dc_remove_stream_from_ctx(
10134                                 dm->dc,
10135                                 dm_state->context,
10136                                 dm_old_crtc_state->stream) != DC_OK) {
10137                         ret = -EINVAL;
10138                         goto fail;
10139                 }
10140
10141                 dc_stream_release(dm_old_crtc_state->stream);
10142                 dm_new_crtc_state->stream = NULL;
10143
10144                 reset_freesync_config_for_crtc(dm_new_crtc_state);
10145
10146                 *lock_and_validation_needed = true;
10147
10148         } else {/* Add stream for any updated/enabled CRTC */
10149                 /*
10150                  * Quick fix to prevent NULL pointer on new_stream when
10151                  * added MST connectors not found in existing crtc_state in the chained mode
10152                  * TODO: need to dig out the root cause of that
10153                  */
10154                 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10155                         goto skip_modeset;
10156
10157                 if (modereset_required(new_crtc_state))
10158                         goto skip_modeset;
10159
10160                 if (modeset_required(new_crtc_state, new_stream,
10161                                      dm_old_crtc_state->stream)) {
10162
10163                         WARN_ON(dm_new_crtc_state->stream);
10164
10165                         ret = dm_atomic_get_state(state, &dm_state);
10166                         if (ret)
10167                                 goto fail;
10168
10169                         dm_new_crtc_state->stream = new_stream;
10170
10171                         dc_stream_retain(new_stream);
10172
10173                         DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10174                                          crtc->base.id);
10175
10176                         if (dc_add_stream_to_ctx(
10177                                         dm->dc,
10178                                         dm_state->context,
10179                                         dm_new_crtc_state->stream) != DC_OK) {
10180                                 ret = -EINVAL;
10181                                 goto fail;
10182                         }
10183
10184                         *lock_and_validation_needed = true;
10185                 }
10186         }
10187
10188 skip_modeset:
10189         /* Release extra reference */
10190         if (new_stream)
10191                  dc_stream_release(new_stream);
10192
10193         /*
10194          * We want to do dc stream updates that do not require a
10195          * full modeset below.
10196          */
10197         if (!(enable && aconnector && new_crtc_state->active))
10198                 return 0;
10199         /*
10200          * Given above conditions, the dc state cannot be NULL because:
10201          * 1. We're in the process of enabling CRTCs (just been added
10202          *    to the dc context, or already is on the context)
10203          * 2. Has a valid connector attached, and
10204          * 3. Is currently active and enabled.
10205          * => The dc stream state currently exists.
10206          */
10207         BUG_ON(dm_new_crtc_state->stream == NULL);
10208
10209         /* Scaling or underscan settings */
10210         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10211                                 drm_atomic_crtc_needs_modeset(new_crtc_state))
10212                 update_stream_scaling_settings(
10213                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10214
10215         /* ABM settings */
10216         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10217
10218         /*
10219          * Color management settings. We also update color properties
10220          * when a modeset is needed, to ensure it gets reprogrammed.
10221          */
10222         if (dm_new_crtc_state->base.color_mgmt_changed ||
10223             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10224                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10225                 if (ret)
10226                         goto fail;
10227         }
10228
10229         /* Update Freesync settings. */
10230         get_freesync_config_for_crtc(dm_new_crtc_state,
10231                                      dm_new_conn_state);
10232
10233         return ret;
10234
10235 fail:
10236         if (new_stream)
10237                 dc_stream_release(new_stream);
10238         return ret;
10239 }
10240
10241 static bool should_reset_plane(struct drm_atomic_state *state,
10242                                struct drm_plane *plane,
10243                                struct drm_plane_state *old_plane_state,
10244                                struct drm_plane_state *new_plane_state)
10245 {
10246         struct drm_plane *other;
10247         struct drm_plane_state *old_other_state, *new_other_state;
10248         struct drm_crtc_state *new_crtc_state;
10249         int i;
10250
10251         /*
10252          * TODO: Remove this hack once the checks below are sufficient
10253          * enough to determine when we need to reset all the planes on
10254          * the stream.
10255          */
10256         if (state->allow_modeset)
10257                 return true;
10258
10259         /* Exit early if we know that we're adding or removing the plane. */
10260         if (old_plane_state->crtc != new_plane_state->crtc)
10261                 return true;
10262
10263         /* old crtc == new_crtc == NULL, plane not in context. */
10264         if (!new_plane_state->crtc)
10265                 return false;
10266
10267         new_crtc_state =
10268                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10269
10270         if (!new_crtc_state)
10271                 return true;
10272
10273         /* CRTC Degamma changes currently require us to recreate planes. */
10274         if (new_crtc_state->color_mgmt_changed)
10275                 return true;
10276
10277         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10278                 return true;
10279
10280         /*
10281          * If there are any new primary or overlay planes being added or
10282          * removed then the z-order can potentially change. To ensure
10283          * correct z-order and pipe acquisition the current DC architecture
10284          * requires us to remove and recreate all existing planes.
10285          *
10286          * TODO: Come up with a more elegant solution for this.
10287          */
10288         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10289                 struct amdgpu_framebuffer *old_afb, *new_afb;
10290                 if (other->type == DRM_PLANE_TYPE_CURSOR)
10291                         continue;
10292
10293                 if (old_other_state->crtc != new_plane_state->crtc &&
10294                     new_other_state->crtc != new_plane_state->crtc)
10295                         continue;
10296
10297                 if (old_other_state->crtc != new_other_state->crtc)
10298                         return true;
10299
10300                 /* Src/dst size and scaling updates. */
10301                 if (old_other_state->src_w != new_other_state->src_w ||
10302                     old_other_state->src_h != new_other_state->src_h ||
10303                     old_other_state->crtc_w != new_other_state->crtc_w ||
10304                     old_other_state->crtc_h != new_other_state->crtc_h)
10305                         return true;
10306
10307                 /* Rotation / mirroring updates. */
10308                 if (old_other_state->rotation != new_other_state->rotation)
10309                         return true;
10310
10311                 /* Blending updates. */
10312                 if (old_other_state->pixel_blend_mode !=
10313                     new_other_state->pixel_blend_mode)
10314                         return true;
10315
10316                 /* Alpha updates. */
10317                 if (old_other_state->alpha != new_other_state->alpha)
10318                         return true;
10319
10320                 /* Colorspace changes. */
10321                 if (old_other_state->color_range != new_other_state->color_range ||
10322                     old_other_state->color_encoding != new_other_state->color_encoding)
10323                         return true;
10324
10325                 /* Framebuffer checks fall at the end. */
10326                 if (!old_other_state->fb || !new_other_state->fb)
10327                         continue;
10328
10329                 /* Pixel format changes can require bandwidth updates. */
10330                 if (old_other_state->fb->format != new_other_state->fb->format)
10331                         return true;
10332
10333                 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10334                 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10335
10336                 /* Tiling and DCC changes also require bandwidth updates. */
10337                 if (old_afb->tiling_flags != new_afb->tiling_flags ||
10338                     old_afb->base.modifier != new_afb->base.modifier)
10339                         return true;
10340         }
10341
10342         return false;
10343 }
10344
10345 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10346                               struct drm_plane_state *new_plane_state,
10347                               struct drm_framebuffer *fb)
10348 {
10349         struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10350         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10351         unsigned int pitch;
10352         bool linear;
10353
10354         if (fb->width > new_acrtc->max_cursor_width ||
10355             fb->height > new_acrtc->max_cursor_height) {
10356                 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10357                                  new_plane_state->fb->width,
10358                                  new_plane_state->fb->height);
10359                 return -EINVAL;
10360         }
10361         if (new_plane_state->src_w != fb->width << 16 ||
10362             new_plane_state->src_h != fb->height << 16) {
10363                 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10364                 return -EINVAL;
10365         }
10366
10367         /* Pitch in pixels */
10368         pitch = fb->pitches[0] / fb->format->cpp[0];
10369
10370         if (fb->width != pitch) {
10371                 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10372                                  fb->width, pitch);
10373                 return -EINVAL;
10374         }
10375
10376         switch (pitch) {
10377         case 64:
10378         case 128:
10379         case 256:
10380                 /* FB pitch is supported by cursor plane */
10381                 break;
10382         default:
10383                 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10384                 return -EINVAL;
10385         }
10386
10387         /* Core DRM takes care of checking FB modifiers, so we only need to
10388          * check tiling flags when the FB doesn't have a modifier. */
10389         if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10390                 if (adev->family < AMDGPU_FAMILY_AI) {
10391                         linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10392                                  AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10393                                  AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10394                 } else {
10395                         linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10396                 }
10397                 if (!linear) {
10398                         DRM_DEBUG_ATOMIC("Cursor FB not linear");
10399                         return -EINVAL;
10400                 }
10401         }
10402
10403         return 0;
10404 }
10405
10406 static int dm_update_plane_state(struct dc *dc,
10407                                  struct drm_atomic_state *state,
10408                                  struct drm_plane *plane,
10409                                  struct drm_plane_state *old_plane_state,
10410                                  struct drm_plane_state *new_plane_state,
10411                                  bool enable,
10412                                  bool *lock_and_validation_needed)
10413 {
10414
10415         struct dm_atomic_state *dm_state = NULL;
10416         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10417         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10418         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10419         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10420         struct amdgpu_crtc *new_acrtc;
10421         bool needs_reset;
10422         int ret = 0;
10423
10424
10425         new_plane_crtc = new_plane_state->crtc;
10426         old_plane_crtc = old_plane_state->crtc;
10427         dm_new_plane_state = to_dm_plane_state(new_plane_state);
10428         dm_old_plane_state = to_dm_plane_state(old_plane_state);
10429
10430         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10431                 if (!enable || !new_plane_crtc ||
10432                         drm_atomic_plane_disabling(plane->state, new_plane_state))
10433                         return 0;
10434
10435                 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10436
10437                 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10438                         DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10439                         return -EINVAL;
10440                 }
10441
10442                 if (new_plane_state->fb) {
10443                         ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10444                                                  new_plane_state->fb);
10445                         if (ret)
10446                                 return ret;
10447                 }
10448
10449                 return 0;
10450         }
10451
10452         needs_reset = should_reset_plane(state, plane, old_plane_state,
10453                                          new_plane_state);
10454
10455         /* Remove any changed/removed planes */
10456         if (!enable) {
10457                 if (!needs_reset)
10458                         return 0;
10459
10460                 if (!old_plane_crtc)
10461                         return 0;
10462
10463                 old_crtc_state = drm_atomic_get_old_crtc_state(
10464                                 state, old_plane_crtc);
10465                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10466
10467                 if (!dm_old_crtc_state->stream)
10468                         return 0;
10469
10470                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10471                                 plane->base.id, old_plane_crtc->base.id);
10472
10473                 ret = dm_atomic_get_state(state, &dm_state);
10474                 if (ret)
10475                         return ret;
10476
10477                 if (!dc_remove_plane_from_context(
10478                                 dc,
10479                                 dm_old_crtc_state->stream,
10480                                 dm_old_plane_state->dc_state,
10481                                 dm_state->context)) {
10482
10483                         return -EINVAL;
10484                 }
10485
10486
10487                 dc_plane_state_release(dm_old_plane_state->dc_state);
10488                 dm_new_plane_state->dc_state = NULL;
10489
10490                 *lock_and_validation_needed = true;
10491
10492         } else { /* Add new planes */
10493                 struct dc_plane_state *dc_new_plane_state;
10494
10495                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10496                         return 0;
10497
10498                 if (!new_plane_crtc)
10499                         return 0;
10500
10501                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10502                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10503
10504                 if (!dm_new_crtc_state->stream)
10505                         return 0;
10506
10507                 if (!needs_reset)
10508                         return 0;
10509
10510                 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10511                 if (ret)
10512                         return ret;
10513
10514                 WARN_ON(dm_new_plane_state->dc_state);
10515
10516                 dc_new_plane_state = dc_create_plane_state(dc);
10517                 if (!dc_new_plane_state)
10518                         return -ENOMEM;
10519
10520                 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10521                                  plane->base.id, new_plane_crtc->base.id);
10522
10523                 ret = fill_dc_plane_attributes(
10524                         drm_to_adev(new_plane_crtc->dev),
10525                         dc_new_plane_state,
10526                         new_plane_state,
10527                         new_crtc_state);
10528                 if (ret) {
10529                         dc_plane_state_release(dc_new_plane_state);
10530                         return ret;
10531                 }
10532
10533                 ret = dm_atomic_get_state(state, &dm_state);
10534                 if (ret) {
10535                         dc_plane_state_release(dc_new_plane_state);
10536                         return ret;
10537                 }
10538
10539                 /*
10540                  * Any atomic check errors that occur after this will
10541                  * not need a release. The plane state will be attached
10542                  * to the stream, and therefore part of the atomic
10543                  * state. It'll be released when the atomic state is
10544                  * cleaned.
10545                  */
10546                 if (!dc_add_plane_to_context(
10547                                 dc,
10548                                 dm_new_crtc_state->stream,
10549                                 dc_new_plane_state,
10550                                 dm_state->context)) {
10551
10552                         dc_plane_state_release(dc_new_plane_state);
10553                         return -EINVAL;
10554                 }
10555
10556                 dm_new_plane_state->dc_state = dc_new_plane_state;
10557
10558                 /* Tell DC to do a full surface update every time there
10559                  * is a plane change. Inefficient, but works for now.
10560                  */
10561                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10562
10563                 *lock_and_validation_needed = true;
10564         }
10565
10566
10567         return ret;
10568 }
10569
10570 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10571                                 struct drm_crtc *crtc,
10572                                 struct drm_crtc_state *new_crtc_state)
10573 {
10574         struct drm_plane_state *new_cursor_state, *new_primary_state;
10575         int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
10576
10577         /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10578          * cursor per pipe but it's going to inherit the scaling and
10579          * positioning from the underlying pipe. Check the cursor plane's
10580          * blending properties match the primary plane's. */
10581
10582         new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
10583         new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
10584         if (!new_cursor_state || !new_primary_state ||
10585             !new_cursor_state->fb || !new_primary_state->fb) {
10586                 return 0;
10587         }
10588
10589         cursor_scale_w = new_cursor_state->crtc_w * 1000 /
10590                          (new_cursor_state->src_w >> 16);
10591         cursor_scale_h = new_cursor_state->crtc_h * 1000 /
10592                          (new_cursor_state->src_h >> 16);
10593
10594         primary_scale_w = new_primary_state->crtc_w * 1000 /
10595                          (new_primary_state->src_w >> 16);
10596         primary_scale_h = new_primary_state->crtc_h * 1000 /
10597                          (new_primary_state->src_h >> 16);
10598
10599         if (cursor_scale_w != primary_scale_w ||
10600             cursor_scale_h != primary_scale_h) {
10601                 drm_dbg_atomic(crtc->dev, "Cursor plane scaling doesn't match primary plane\n");
10602                 return -EINVAL;
10603         }
10604
10605         return 0;
10606 }
10607
10608 #if defined(CONFIG_DRM_AMD_DC_DCN)
10609 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10610 {
10611         struct drm_connector *connector;
10612         struct drm_connector_state *conn_state, *old_conn_state;
10613         struct amdgpu_dm_connector *aconnector = NULL;
10614         int i;
10615         for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
10616                 if (!conn_state->crtc)
10617                         conn_state = old_conn_state;
10618
10619                 if (conn_state->crtc != crtc)
10620                         continue;
10621
10622                 aconnector = to_amdgpu_dm_connector(connector);
10623                 if (!aconnector->port || !aconnector->mst_port)
10624                         aconnector = NULL;
10625                 else
10626                         break;
10627         }
10628
10629         if (!aconnector)
10630                 return 0;
10631
10632         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10633 }
10634 #endif
10635
10636 static int validate_overlay(struct drm_atomic_state *state)
10637 {
10638         int i;
10639         struct drm_plane *plane;
10640         struct drm_plane_state *new_plane_state;
10641         struct drm_plane_state *primary_state, *overlay_state = NULL;
10642
10643         /* Check if primary plane is contained inside overlay */
10644         for_each_new_plane_in_state_reverse(state, plane, new_plane_state, i) {
10645                 if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
10646                         if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10647                                 return 0;
10648
10649                         overlay_state = new_plane_state;
10650                         continue;
10651                 }
10652         }
10653
10654         /* check if we're making changes to the overlay plane */
10655         if (!overlay_state)
10656                 return 0;
10657
10658         /* check if overlay plane is enabled */
10659         if (!overlay_state->crtc)
10660                 return 0;
10661
10662         /* find the primary plane for the CRTC that the overlay is enabled on */
10663         primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
10664         if (IS_ERR(primary_state))
10665                 return PTR_ERR(primary_state);
10666
10667         /* check if primary plane is enabled */
10668         if (!primary_state->crtc)
10669                 return 0;
10670
10671         /* Perform the bounds check to ensure the overlay plane covers the primary */
10672         if (primary_state->crtc_x < overlay_state->crtc_x ||
10673             primary_state->crtc_y < overlay_state->crtc_y ||
10674             primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
10675             primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
10676                 DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
10677                 return -EINVAL;
10678         }
10679
10680         return 0;
10681 }
10682
10683 /**
10684  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10685  * @dev: The DRM device
10686  * @state: The atomic state to commit
10687  *
10688  * Validate that the given atomic state is programmable by DC into hardware.
10689  * This involves constructing a &struct dc_state reflecting the new hardware
10690  * state we wish to commit, then querying DC to see if it is programmable. It's
10691  * important not to modify the existing DC state. Otherwise, atomic_check
10692  * may unexpectedly commit hardware changes.
10693  *
10694  * When validating the DC state, it's important that the right locks are
10695  * acquired. For full updates case which removes/adds/updates streams on one
10696  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10697  * that any such full update commit will wait for completion of any outstanding
10698  * flip using DRMs synchronization events.
10699  *
10700  * Note that DM adds the affected connectors for all CRTCs in state, when that
10701  * might not seem necessary. This is because DC stream creation requires the
10702  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10703  * be possible but non-trivial - a possible TODO item.
10704  *
10705  * Return: -Error code if validation failed.
10706  */
10707 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10708                                   struct drm_atomic_state *state)
10709 {
10710         struct amdgpu_device *adev = drm_to_adev(dev);
10711         struct dm_atomic_state *dm_state = NULL;
10712         struct dc *dc = adev->dm.dc;
10713         struct drm_connector *connector;
10714         struct drm_connector_state *old_con_state, *new_con_state;
10715         struct drm_crtc *crtc;
10716         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10717         struct drm_plane *plane;
10718         struct drm_plane_state *old_plane_state, *new_plane_state;
10719         enum dc_status status;
10720         int ret, i;
10721         bool lock_and_validation_needed = false;
10722         struct dm_crtc_state *dm_old_crtc_state;
10723 #if defined(CONFIG_DRM_AMD_DC_DCN)
10724         struct dsc_mst_fairness_vars vars[MAX_PIPES];
10725 #endif
10726
10727         trace_amdgpu_dm_atomic_check_begin(state);
10728
10729         ret = drm_atomic_helper_check_modeset(dev, state);
10730         if (ret)
10731                 goto fail;
10732
10733         /* Check connector changes */
10734         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10735                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10736                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10737
10738                 /* Skip connectors that are disabled or part of modeset already. */
10739                 if (!old_con_state->crtc && !new_con_state->crtc)
10740                         continue;
10741
10742                 if (!new_con_state->crtc)
10743                         continue;
10744
10745                 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10746                 if (IS_ERR(new_crtc_state)) {
10747                         ret = PTR_ERR(new_crtc_state);
10748                         goto fail;
10749                 }
10750
10751                 if (dm_old_con_state->abm_level !=
10752                     dm_new_con_state->abm_level)
10753                         new_crtc_state->connectors_changed = true;
10754         }
10755
10756 #if defined(CONFIG_DRM_AMD_DC_DCN)
10757         if (dc_resource_is_dsc_encoding_supported(dc)) {
10758                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10759                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10760                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
10761                                 if (ret)
10762                                         goto fail;
10763                         }
10764                 }
10765         }
10766 #endif
10767         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10768                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10769
10770                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10771                     !new_crtc_state->color_mgmt_changed &&
10772                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10773                         dm_old_crtc_state->dsc_force_changed == false)
10774                         continue;
10775
10776                 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
10777                 if (ret)
10778                         goto fail;
10779
10780                 if (!new_crtc_state->enable)
10781                         continue;
10782
10783                 ret = drm_atomic_add_affected_connectors(state, crtc);
10784                 if (ret)
10785                         return ret;
10786
10787                 ret = drm_atomic_add_affected_planes(state, crtc);
10788                 if (ret)
10789                         goto fail;
10790
10791                 if (dm_old_crtc_state->dsc_force_changed)
10792                         new_crtc_state->mode_changed = true;
10793         }
10794
10795         /*
10796          * Add all primary and overlay planes on the CRTC to the state
10797          * whenever a plane is enabled to maintain correct z-ordering
10798          * and to enable fast surface updates.
10799          */
10800         drm_for_each_crtc(crtc, dev) {
10801                 bool modified = false;
10802
10803                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10804                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
10805                                 continue;
10806
10807                         if (new_plane_state->crtc == crtc ||
10808                             old_plane_state->crtc == crtc) {
10809                                 modified = true;
10810                                 break;
10811                         }
10812                 }
10813
10814                 if (!modified)
10815                         continue;
10816
10817                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10818                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
10819                                 continue;
10820
10821                         new_plane_state =
10822                                 drm_atomic_get_plane_state(state, plane);
10823
10824                         if (IS_ERR(new_plane_state)) {
10825                                 ret = PTR_ERR(new_plane_state);
10826                                 goto fail;
10827                         }
10828                 }
10829         }
10830
10831         /* Remove exiting planes if they are modified */
10832         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10833                 ret = dm_update_plane_state(dc, state, plane,
10834                                             old_plane_state,
10835                                             new_plane_state,
10836                                             false,
10837                                             &lock_and_validation_needed);
10838                 if (ret)
10839                         goto fail;
10840         }
10841
10842         /* Disable all crtcs which require disable */
10843         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10844                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10845                                            old_crtc_state,
10846                                            new_crtc_state,
10847                                            false,
10848                                            &lock_and_validation_needed);
10849                 if (ret)
10850                         goto fail;
10851         }
10852
10853         /* Enable all crtcs which require enable */
10854         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10855                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10856                                            old_crtc_state,
10857                                            new_crtc_state,
10858                                            true,
10859                                            &lock_and_validation_needed);
10860                 if (ret)
10861                         goto fail;
10862         }
10863
10864         ret = validate_overlay(state);
10865         if (ret)
10866                 goto fail;
10867
10868         /* Add new/modified planes */
10869         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10870                 ret = dm_update_plane_state(dc, state, plane,
10871                                             old_plane_state,
10872                                             new_plane_state,
10873                                             true,
10874                                             &lock_and_validation_needed);
10875                 if (ret)
10876                         goto fail;
10877         }
10878
10879         /* Run this here since we want to validate the streams we created */
10880         ret = drm_atomic_helper_check_planes(dev, state);
10881         if (ret)
10882                 goto fail;
10883
10884         /* Check cursor planes scaling */
10885         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10886                 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10887                 if (ret)
10888                         goto fail;
10889         }
10890
10891         if (state->legacy_cursor_update) {
10892                 /*
10893                  * This is a fast cursor update coming from the plane update
10894                  * helper, check if it can be done asynchronously for better
10895                  * performance.
10896                  */
10897                 state->async_update =
10898                         !drm_atomic_helper_async_check(dev, state);
10899
10900                 /*
10901                  * Skip the remaining global validation if this is an async
10902                  * update. Cursor updates can be done without affecting
10903                  * state or bandwidth calcs and this avoids the performance
10904                  * penalty of locking the private state object and
10905                  * allocating a new dc_state.
10906                  */
10907                 if (state->async_update)
10908                         return 0;
10909         }
10910
10911         /* Check scaling and underscan changes*/
10912         /* TODO Removed scaling changes validation due to inability to commit
10913          * new stream into context w\o causing full reset. Need to
10914          * decide how to handle.
10915          */
10916         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10917                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10918                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10919                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10920
10921                 /* Skip any modesets/resets */
10922                 if (!acrtc || drm_atomic_crtc_needs_modeset(
10923                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10924                         continue;
10925
10926                 /* Skip any thing not scale or underscan changes */
10927                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10928                         continue;
10929
10930                 lock_and_validation_needed = true;
10931         }
10932
10933         /**
10934          * Streams and planes are reset when there are changes that affect
10935          * bandwidth. Anything that affects bandwidth needs to go through
10936          * DC global validation to ensure that the configuration can be applied
10937          * to hardware.
10938          *
10939          * We have to currently stall out here in atomic_check for outstanding
10940          * commits to finish in this case because our IRQ handlers reference
10941          * DRM state directly - we can end up disabling interrupts too early
10942          * if we don't.
10943          *
10944          * TODO: Remove this stall and drop DM state private objects.
10945          */
10946         if (lock_and_validation_needed) {
10947                 ret = dm_atomic_get_state(state, &dm_state);
10948                 if (ret)
10949                         goto fail;
10950
10951                 ret = do_aquire_global_lock(dev, state);
10952                 if (ret)
10953                         goto fail;
10954
10955 #if defined(CONFIG_DRM_AMD_DC_DCN)
10956                 if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars))
10957                         goto fail;
10958
10959                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
10960                 if (ret)
10961                         goto fail;
10962 #endif
10963
10964                 /*
10965                  * Perform validation of MST topology in the state:
10966                  * We need to perform MST atomic check before calling
10967                  * dc_validate_global_state(), or there is a chance
10968                  * to get stuck in an infinite loop and hang eventually.
10969                  */
10970                 ret = drm_dp_mst_atomic_check(state);
10971                 if (ret)
10972                         goto fail;
10973                 status = dc_validate_global_state(dc, dm_state->context, false);
10974                 if (status != DC_OK) {
10975                         drm_dbg_atomic(dev,
10976                                        "DC global validation failure: %s (%d)",
10977                                        dc_status_to_str(status), status);
10978                         ret = -EINVAL;
10979                         goto fail;
10980                 }
10981         } else {
10982                 /*
10983                  * The commit is a fast update. Fast updates shouldn't change
10984                  * the DC context, affect global validation, and can have their
10985                  * commit work done in parallel with other commits not touching
10986                  * the same resource. If we have a new DC context as part of
10987                  * the DM atomic state from validation we need to free it and
10988                  * retain the existing one instead.
10989                  *
10990                  * Furthermore, since the DM atomic state only contains the DC
10991                  * context and can safely be annulled, we can free the state
10992                  * and clear the associated private object now to free
10993                  * some memory and avoid a possible use-after-free later.
10994                  */
10995
10996                 for (i = 0; i < state->num_private_objs; i++) {
10997                         struct drm_private_obj *obj = state->private_objs[i].ptr;
10998
10999                         if (obj->funcs == adev->dm.atomic_obj.funcs) {
11000                                 int j = state->num_private_objs-1;
11001
11002                                 dm_atomic_destroy_state(obj,
11003                                                 state->private_objs[i].state);
11004
11005                                 /* If i is not at the end of the array then the
11006                                  * last element needs to be moved to where i was
11007                                  * before the array can safely be truncated.
11008                                  */
11009                                 if (i != j)
11010                                         state->private_objs[i] =
11011                                                 state->private_objs[j];
11012
11013                                 state->private_objs[j].ptr = NULL;
11014                                 state->private_objs[j].state = NULL;
11015                                 state->private_objs[j].old_state = NULL;
11016                                 state->private_objs[j].new_state = NULL;
11017
11018                                 state->num_private_objs = j;
11019                                 break;
11020                         }
11021                 }
11022         }
11023
11024         /* Store the overall update type for use later in atomic check. */
11025         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11026                 struct dm_crtc_state *dm_new_crtc_state =
11027                         to_dm_crtc_state(new_crtc_state);
11028
11029                 dm_new_crtc_state->update_type = lock_and_validation_needed ?
11030                                                          UPDATE_TYPE_FULL :
11031                                                          UPDATE_TYPE_FAST;
11032         }
11033
11034         /* Must be success */
11035         WARN_ON(ret);
11036
11037         trace_amdgpu_dm_atomic_check_finish(state, ret);
11038
11039         return ret;
11040
11041 fail:
11042         if (ret == -EDEADLK)
11043                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
11044         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
11045                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
11046         else
11047                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
11048
11049         trace_amdgpu_dm_atomic_check_finish(state, ret);
11050
11051         return ret;
11052 }
11053
11054 static bool is_dp_capable_without_timing_msa(struct dc *dc,
11055                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
11056 {
11057         uint8_t dpcd_data;
11058         bool capable = false;
11059
11060         if (amdgpu_dm_connector->dc_link &&
11061                 dm_helpers_dp_read_dpcd(
11062                                 NULL,
11063                                 amdgpu_dm_connector->dc_link,
11064                                 DP_DOWN_STREAM_PORT_COUNT,
11065                                 &dpcd_data,
11066                                 sizeof(dpcd_data))) {
11067                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11068         }
11069
11070         return capable;
11071 }
11072
11073 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11074                 unsigned int offset,
11075                 unsigned int total_length,
11076                 uint8_t *data,
11077                 unsigned int length,
11078                 struct amdgpu_hdmi_vsdb_info *vsdb)
11079 {
11080         bool res;
11081         union dmub_rb_cmd cmd;
11082         struct dmub_cmd_send_edid_cea *input;
11083         struct dmub_cmd_edid_cea_output *output;
11084
11085         if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11086                 return false;
11087
11088         memset(&cmd, 0, sizeof(cmd));
11089
11090         input = &cmd.edid_cea.data.input;
11091
11092         cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11093         cmd.edid_cea.header.sub_type = 0;
11094         cmd.edid_cea.header.payload_bytes =
11095                 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11096         input->offset = offset;
11097         input->length = length;
11098         input->total_length = total_length;
11099         memcpy(input->payload, data, length);
11100
11101         res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11102         if (!res) {
11103                 DRM_ERROR("EDID CEA parser failed\n");
11104                 return false;
11105         }
11106
11107         output = &cmd.edid_cea.data.output;
11108
11109         if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11110                 if (!output->ack.success) {
11111                         DRM_ERROR("EDID CEA ack failed at offset %d\n",
11112                                         output->ack.offset);
11113                 }
11114         } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11115                 if (!output->amd_vsdb.vsdb_found)
11116                         return false;
11117
11118                 vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11119                 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11120                 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11121                 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11122         } else {
11123                 DRM_WARN("Unknown EDID CEA parser results\n");
11124                 return false;
11125         }
11126
11127         return true;
11128 }
11129
11130 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
11131                 uint8_t *edid_ext, int len,
11132                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11133 {
11134         int i;
11135
11136         /* send extension block to DMCU for parsing */
11137         for (i = 0; i < len; i += 8) {
11138                 bool res;
11139                 int offset;
11140
11141                 /* send 8 bytes a time */
11142                 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
11143                         return false;
11144
11145                 if (i+8 == len) {
11146                         /* EDID block sent completed, expect result */
11147                         int version, min_rate, max_rate;
11148
11149                         res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
11150                         if (res) {
11151                                 /* amd vsdb found */
11152                                 vsdb_info->freesync_supported = 1;
11153                                 vsdb_info->amd_vsdb_version = version;
11154                                 vsdb_info->min_refresh_rate_hz = min_rate;
11155                                 vsdb_info->max_refresh_rate_hz = max_rate;
11156                                 return true;
11157                         }
11158                         /* not amd vsdb */
11159                         return false;
11160                 }
11161
11162                 /* check for ack*/
11163                 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
11164                 if (!res)
11165                         return false;
11166         }
11167
11168         return false;
11169 }
11170
11171 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11172                 uint8_t *edid_ext, int len,
11173                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11174 {
11175         int i;
11176
11177         /* send extension block to DMCU for parsing */
11178         for (i = 0; i < len; i += 8) {
11179                 /* send 8 bytes a time */
11180                 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11181                         return false;
11182         }
11183
11184         return vsdb_info->freesync_supported;
11185 }
11186
11187 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11188                 uint8_t *edid_ext, int len,
11189                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11190 {
11191         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11192
11193         if (adev->dm.dmub_srv)
11194                 return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11195         else
11196                 return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11197 }
11198
11199 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11200                 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11201 {
11202         uint8_t *edid_ext = NULL;
11203         int i;
11204         bool valid_vsdb_found = false;
11205
11206         /*----- drm_find_cea_extension() -----*/
11207         /* No EDID or EDID extensions */
11208         if (edid == NULL || edid->extensions == 0)
11209                 return -ENODEV;
11210
11211         /* Find CEA extension */
11212         for (i = 0; i < edid->extensions; i++) {
11213                 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11214                 if (edid_ext[0] == CEA_EXT)
11215                         break;
11216         }
11217
11218         if (i == edid->extensions)
11219                 return -ENODEV;
11220
11221         /*----- cea_db_offsets() -----*/
11222         if (edid_ext[0] != CEA_EXT)
11223                 return -ENODEV;
11224
11225         valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11226
11227         return valid_vsdb_found ? i : -ENODEV;
11228 }
11229
11230 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11231                                         struct edid *edid)
11232 {
11233         int i = 0;
11234         struct detailed_timing *timing;
11235         struct detailed_non_pixel *data;
11236         struct detailed_data_monitor_range *range;
11237         struct amdgpu_dm_connector *amdgpu_dm_connector =
11238                         to_amdgpu_dm_connector(connector);
11239         struct dm_connector_state *dm_con_state = NULL;
11240
11241         struct drm_device *dev = connector->dev;
11242         struct amdgpu_device *adev = drm_to_adev(dev);
11243         bool freesync_capable = false;
11244         struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11245
11246         if (!connector->state) {
11247                 DRM_ERROR("%s - Connector has no state", __func__);
11248                 goto update;
11249         }
11250
11251         if (!edid) {
11252                 dm_con_state = to_dm_connector_state(connector->state);
11253
11254                 amdgpu_dm_connector->min_vfreq = 0;
11255                 amdgpu_dm_connector->max_vfreq = 0;
11256                 amdgpu_dm_connector->pixel_clock_mhz = 0;
11257
11258                 goto update;
11259         }
11260
11261         dm_con_state = to_dm_connector_state(connector->state);
11262
11263         if (!amdgpu_dm_connector->dc_sink) {
11264                 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
11265                 goto update;
11266         }
11267         if (!adev->dm.freesync_module)
11268                 goto update;
11269
11270
11271         if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11272                 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
11273                 bool edid_check_required = false;
11274
11275                 if (edid) {
11276                         edid_check_required = is_dp_capable_without_timing_msa(
11277                                                 adev->dm.dc,
11278                                                 amdgpu_dm_connector);
11279                 }
11280
11281                 if (edid_check_required == true && (edid->version > 1 ||
11282                    (edid->version == 1 && edid->revision > 1))) {
11283                         for (i = 0; i < 4; i++) {
11284
11285                                 timing  = &edid->detailed_timings[i];
11286                                 data    = &timing->data.other_data;
11287                                 range   = &data->data.range;
11288                                 /*
11289                                  * Check if monitor has continuous frequency mode
11290                                  */
11291                                 if (data->type != EDID_DETAIL_MONITOR_RANGE)
11292                                         continue;
11293                                 /*
11294                                  * Check for flag range limits only. If flag == 1 then
11295                                  * no additional timing information provided.
11296                                  * Default GTF, GTF Secondary curve and CVT are not
11297                                  * supported
11298                                  */
11299                                 if (range->flags != 1)
11300                                         continue;
11301
11302                                 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11303                                 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11304                                 amdgpu_dm_connector->pixel_clock_mhz =
11305                                         range->pixel_clock_mhz * 10;
11306
11307                                 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11308                                 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11309
11310                                 break;
11311                         }
11312
11313                         if (amdgpu_dm_connector->max_vfreq -
11314                             amdgpu_dm_connector->min_vfreq > 10) {
11315
11316                                 freesync_capable = true;
11317                         }
11318                 }
11319         } else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11320                 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11321                 if (i >= 0 && vsdb_info.freesync_supported) {
11322                         timing  = &edid->detailed_timings[i];
11323                         data    = &timing->data.other_data;
11324
11325                         amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11326                         amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11327                         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11328                                 freesync_capable = true;
11329
11330                         connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11331                         connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11332                 }
11333         }
11334
11335 update:
11336         if (dm_con_state)
11337                 dm_con_state->freesync_capable = freesync_capable;
11338
11339         if (connector->vrr_capable_property)
11340                 drm_connector_set_vrr_capable_property(connector,
11341                                                        freesync_capable);
11342 }
11343
11344 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11345 {
11346         struct amdgpu_device *adev = drm_to_adev(dev);
11347         struct dc *dc = adev->dm.dc;
11348         int i;
11349
11350         mutex_lock(&adev->dm.dc_lock);
11351         if (dc->current_state) {
11352                 for (i = 0; i < dc->current_state->stream_count; ++i)
11353                         dc->current_state->streams[i]
11354                                 ->triggered_crtc_reset.enabled =
11355                                 adev->dm.force_timing_sync;
11356
11357                 dm_enable_per_frame_crtc_master_sync(dc->current_state);
11358                 dc_trigger_sync(dc, dc->current_state);
11359         }
11360         mutex_unlock(&adev->dm.dc_lock);
11361 }
11362
11363 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11364                        uint32_t value, const char *func_name)
11365 {
11366 #ifdef DM_CHECK_ADDR_0
11367         if (address == 0) {
11368                 DC_ERR("invalid register write. address = 0");
11369                 return;
11370         }
11371 #endif
11372         cgs_write_register(ctx->cgs_device, address, value);
11373         trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11374 }
11375
11376 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11377                           const char *func_name)
11378 {
11379         uint32_t value;
11380 #ifdef DM_CHECK_ADDR_0
11381         if (address == 0) {
11382                 DC_ERR("invalid register read; address = 0\n");
11383                 return 0;
11384         }
11385 #endif
11386
11387         if (ctx->dmub_srv &&
11388             ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11389             !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11390                 ASSERT(false);
11391                 return 0;
11392         }
11393
11394         value = cgs_read_register(ctx->cgs_device, address);
11395
11396         trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11397
11398         return value;
11399 }
11400
11401 int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int linkIndex,
11402                                 struct aux_payload *payload, enum aux_return_code_type *operation_result)
11403 {
11404         struct amdgpu_device *adev = ctx->driver_context;
11405         int ret = 0;
11406
11407         dc_process_dmub_aux_transfer_async(ctx->dc, linkIndex, payload);
11408         ret = wait_for_completion_interruptible_timeout(&adev->dm.dmub_aux_transfer_done, 10*HZ);
11409         if (ret == 0) {
11410                 *operation_result = AUX_RET_ERROR_TIMEOUT;
11411                 return -1;
11412         }
11413         *operation_result = (enum aux_return_code_type)adev->dm.dmub_notify->result;
11414
11415         if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11416                 (*payload->reply) = adev->dm.dmub_notify->aux_reply.command;
11417
11418                 // For read case, Copy data to payload
11419                 if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11420                 (*payload->reply == AUX_TRANSACTION_REPLY_AUX_ACK))
11421                         memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11422                         adev->dm.dmub_notify->aux_reply.length);
11423         }
11424
11425         return adev->dm.dmub_notify->aux_reply.length;
11426 }