drm/amdgpu/display: add helper functions to get/set backlight (v2)
[platform/kernel/linux-starfive.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "dc/dc_edid_parser.h"
38 #include "dc/dc_stat.h"
39 #include "amdgpu_dm_trace.h"
40
41 #include "vid.h"
42 #include "amdgpu.h"
43 #include "amdgpu_display.h"
44 #include "amdgpu_ucode.h"
45 #include "atom.h"
46 #include "amdgpu_dm.h"
47 #ifdef CONFIG_DRM_AMD_DC_HDCP
48 #include "amdgpu_dm_hdcp.h"
49 #include <drm/drm_hdcp.h>
50 #endif
51 #include "amdgpu_pm.h"
52
53 #include "amd_shared.h"
54 #include "amdgpu_dm_irq.h"
55 #include "dm_helpers.h"
56 #include "amdgpu_dm_mst_types.h"
57 #if defined(CONFIG_DEBUG_FS)
58 #include "amdgpu_dm_debugfs.h"
59 #endif
60
61 #include "ivsrcid/ivsrcid_vislands30.h"
62
63 #include "i2caux_interface.h"
64 #include <linux/module.h>
65 #include <linux/moduleparam.h>
66 #include <linux/types.h>
67 #include <linux/pm_runtime.h>
68 #include <linux/pci.h>
69 #include <linux/firmware.h>
70 #include <linux/component.h>
71
72 #include <drm/drm_atomic.h>
73 #include <drm/drm_atomic_uapi.h>
74 #include <drm/drm_atomic_helper.h>
75 #include <drm/drm_dp_mst_helper.h>
76 #include <drm/drm_fb_helper.h>
77 #include <drm/drm_fourcc.h>
78 #include <drm/drm_edid.h>
79 #include <drm/drm_vblank.h>
80 #include <drm/drm_audio_component.h>
81
82 #if defined(CONFIG_DRM_AMD_DC_DCN)
83 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
84
85 #include "dcn/dcn_1_0_offset.h"
86 #include "dcn/dcn_1_0_sh_mask.h"
87 #include "soc15_hw_ip.h"
88 #include "vega10_ip_offset.h"
89
90 #include "soc15_common.h"
91 #endif
92
93 #include "modules/inc/mod_freesync.h"
94 #include "modules/power/power_helpers.h"
95 #include "modules/inc/mod_info_packet.h"
96
97 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
98 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
99 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
100 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
101 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
103 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
105 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
107 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
108 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
109 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
110 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
111
112 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
113 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
114
115 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
116 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
117
118 /* Number of bytes in PSP header for firmware. */
119 #define PSP_HEADER_BYTES 0x100
120
121 /* Number of bytes in PSP footer for firmware. */
122 #define PSP_FOOTER_BYTES 0x100
123
124 /**
125  * DOC: overview
126  *
127  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
128  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
129  * requests into DC requests, and DC responses into DRM responses.
130  *
131  * The root control structure is &struct amdgpu_display_manager.
132  */
133
134 /* basic init/fini API */
135 static int amdgpu_dm_init(struct amdgpu_device *adev);
136 static void amdgpu_dm_fini(struct amdgpu_device *adev);
137 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
138
139 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
140 {
141         switch (link->dpcd_caps.dongle_type) {
142         case DISPLAY_DONGLE_NONE:
143                 return DRM_MODE_SUBCONNECTOR_Native;
144         case DISPLAY_DONGLE_DP_VGA_CONVERTER:
145                 return DRM_MODE_SUBCONNECTOR_VGA;
146         case DISPLAY_DONGLE_DP_DVI_CONVERTER:
147         case DISPLAY_DONGLE_DP_DVI_DONGLE:
148                 return DRM_MODE_SUBCONNECTOR_DVID;
149         case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
150         case DISPLAY_DONGLE_DP_HDMI_DONGLE:
151                 return DRM_MODE_SUBCONNECTOR_HDMIA;
152         case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
153         default:
154                 return DRM_MODE_SUBCONNECTOR_Unknown;
155         }
156 }
157
158 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
159 {
160         struct dc_link *link = aconnector->dc_link;
161         struct drm_connector *connector = &aconnector->base;
162         enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
163
164         if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
165                 return;
166
167         if (aconnector->dc_sink)
168                 subconnector = get_subconnector_type(link);
169
170         drm_object_property_set_value(&connector->base,
171                         connector->dev->mode_config.dp_subconnector_property,
172                         subconnector);
173 }
174
175 /*
176  * initializes drm_device display related structures, based on the information
177  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
178  * drm_encoder, drm_mode_config
179  *
180  * Returns 0 on success
181  */
182 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
183 /* removes and deallocates the drm structures, created by the above function */
184 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
185
186 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
187                                 struct drm_plane *plane,
188                                 unsigned long possible_crtcs,
189                                 const struct dc_plane_cap *plane_cap);
190 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
191                                struct drm_plane *plane,
192                                uint32_t link_index);
193 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
194                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
195                                     uint32_t link_index,
196                                     struct amdgpu_encoder *amdgpu_encoder);
197 static int amdgpu_dm_encoder_init(struct drm_device *dev,
198                                   struct amdgpu_encoder *aencoder,
199                                   uint32_t link_index);
200
201 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
202
203 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
204
205 static int amdgpu_dm_atomic_check(struct drm_device *dev,
206                                   struct drm_atomic_state *state);
207
208 static void handle_cursor_update(struct drm_plane *plane,
209                                  struct drm_plane_state *old_plane_state);
210
211 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
212 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
213 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
214 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
215 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
216
217 static const struct drm_format_info *
218 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
219
220 static bool
221 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
222                                  struct drm_crtc_state *new_crtc_state);
223 /*
224  * dm_vblank_get_counter
225  *
226  * @brief
227  * Get counter for number of vertical blanks
228  *
229  * @param
230  * struct amdgpu_device *adev - [in] desired amdgpu device
231  * int disp_idx - [in] which CRTC to get the counter from
232  *
233  * @return
234  * Counter for vertical blanks
235  */
236 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
237 {
238         if (crtc >= adev->mode_info.num_crtc)
239                 return 0;
240         else {
241                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
242
243                 if (acrtc->dm_irq_params.stream == NULL) {
244                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
245                                   crtc);
246                         return 0;
247                 }
248
249                 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
250         }
251 }
252
253 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
254                                   u32 *vbl, u32 *position)
255 {
256         uint32_t v_blank_start, v_blank_end, h_position, v_position;
257
258         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
259                 return -EINVAL;
260         else {
261                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
262
263                 if (acrtc->dm_irq_params.stream ==  NULL) {
264                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
265                                   crtc);
266                         return 0;
267                 }
268
269                 /*
270                  * TODO rework base driver to use values directly.
271                  * for now parse it back into reg-format
272                  */
273                 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
274                                          &v_blank_start,
275                                          &v_blank_end,
276                                          &h_position,
277                                          &v_position);
278
279                 *position = v_position | (h_position << 16);
280                 *vbl = v_blank_start | (v_blank_end << 16);
281         }
282
283         return 0;
284 }
285
286 static bool dm_is_idle(void *handle)
287 {
288         /* XXX todo */
289         return true;
290 }
291
292 static int dm_wait_for_idle(void *handle)
293 {
294         /* XXX todo */
295         return 0;
296 }
297
298 static bool dm_check_soft_reset(void *handle)
299 {
300         return false;
301 }
302
303 static int dm_soft_reset(void *handle)
304 {
305         /* XXX todo */
306         return 0;
307 }
308
309 static struct amdgpu_crtc *
310 get_crtc_by_otg_inst(struct amdgpu_device *adev,
311                      int otg_inst)
312 {
313         struct drm_device *dev = adev_to_drm(adev);
314         struct drm_crtc *crtc;
315         struct amdgpu_crtc *amdgpu_crtc;
316
317         if (otg_inst == -1) {
318                 WARN_ON(1);
319                 return adev->mode_info.crtcs[0];
320         }
321
322         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
323                 amdgpu_crtc = to_amdgpu_crtc(crtc);
324
325                 if (amdgpu_crtc->otg_inst == otg_inst)
326                         return amdgpu_crtc;
327         }
328
329         return NULL;
330 }
331
332 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
333 {
334         return acrtc->dm_irq_params.freesync_config.state ==
335                        VRR_STATE_ACTIVE_VARIABLE ||
336                acrtc->dm_irq_params.freesync_config.state ==
337                        VRR_STATE_ACTIVE_FIXED;
338 }
339
340 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
341 {
342         return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
343                dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
344 }
345
346 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
347                                               struct dm_crtc_state *new_state)
348 {
349         if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
350                 return true;
351         else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
352                 return true;
353         else
354                 return false;
355 }
356
357 /**
358  * dm_pflip_high_irq() - Handle pageflip interrupt
359  * @interrupt_params: ignored
360  *
361  * Handles the pageflip interrupt by notifying all interested parties
362  * that the pageflip has been completed.
363  */
364 static void dm_pflip_high_irq(void *interrupt_params)
365 {
366         struct amdgpu_crtc *amdgpu_crtc;
367         struct common_irq_params *irq_params = interrupt_params;
368         struct amdgpu_device *adev = irq_params->adev;
369         unsigned long flags;
370         struct drm_pending_vblank_event *e;
371         uint32_t vpos, hpos, v_blank_start, v_blank_end;
372         bool vrr_active;
373
374         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
375
376         /* IRQ could occur when in initial stage */
377         /* TODO work and BO cleanup */
378         if (amdgpu_crtc == NULL) {
379                 DC_LOG_PFLIP("CRTC is null, returning.\n");
380                 return;
381         }
382
383         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
384
385         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
386                 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
387                                                  amdgpu_crtc->pflip_status,
388                                                  AMDGPU_FLIP_SUBMITTED,
389                                                  amdgpu_crtc->crtc_id,
390                                                  amdgpu_crtc);
391                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
392                 return;
393         }
394
395         /* page flip completed. */
396         e = amdgpu_crtc->event;
397         amdgpu_crtc->event = NULL;
398
399         if (!e)
400                 WARN_ON(1);
401
402         vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
403
404         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
405         if (!vrr_active ||
406             !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
407                                       &v_blank_end, &hpos, &vpos) ||
408             (vpos < v_blank_start)) {
409                 /* Update to correct count and vblank timestamp if racing with
410                  * vblank irq. This also updates to the correct vblank timestamp
411                  * even in VRR mode, as scanout is past the front-porch atm.
412                  */
413                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
414
415                 /* Wake up userspace by sending the pageflip event with proper
416                  * count and timestamp of vblank of flip completion.
417                  */
418                 if (e) {
419                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
420
421                         /* Event sent, so done with vblank for this flip */
422                         drm_crtc_vblank_put(&amdgpu_crtc->base);
423                 }
424         } else if (e) {
425                 /* VRR active and inside front-porch: vblank count and
426                  * timestamp for pageflip event will only be up to date after
427                  * drm_crtc_handle_vblank() has been executed from late vblank
428                  * irq handler after start of back-porch (vline 0). We queue the
429                  * pageflip event for send-out by drm_crtc_handle_vblank() with
430                  * updated timestamp and count, once it runs after us.
431                  *
432                  * We need to open-code this instead of using the helper
433                  * drm_crtc_arm_vblank_event(), as that helper would
434                  * call drm_crtc_accurate_vblank_count(), which we must
435                  * not call in VRR mode while we are in front-porch!
436                  */
437
438                 /* sequence will be replaced by real count during send-out. */
439                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
440                 e->pipe = amdgpu_crtc->crtc_id;
441
442                 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
443                 e = NULL;
444         }
445
446         /* Keep track of vblank of this flip for flip throttling. We use the
447          * cooked hw counter, as that one incremented at start of this vblank
448          * of pageflip completion, so last_flip_vblank is the forbidden count
449          * for queueing new pageflips if vsync + VRR is enabled.
450          */
451         amdgpu_crtc->dm_irq_params.last_flip_vblank =
452                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
453
454         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
455         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
456
457         DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
458                      amdgpu_crtc->crtc_id, amdgpu_crtc,
459                      vrr_active, (int) !e);
460 }
461
462 static void dm_vupdate_high_irq(void *interrupt_params)
463 {
464         struct common_irq_params *irq_params = interrupt_params;
465         struct amdgpu_device *adev = irq_params->adev;
466         struct amdgpu_crtc *acrtc;
467         struct drm_device *drm_dev;
468         struct drm_vblank_crtc *vblank;
469         ktime_t frame_duration_ns, previous_timestamp;
470         unsigned long flags;
471         int vrr_active;
472
473         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
474
475         if (acrtc) {
476                 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
477                 drm_dev = acrtc->base.dev;
478                 vblank = &drm_dev->vblank[acrtc->base.index];
479                 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
480                 frame_duration_ns = vblank->time - previous_timestamp;
481
482                 if (frame_duration_ns > 0) {
483                         trace_amdgpu_refresh_rate_track(acrtc->base.index,
484                                                 frame_duration_ns,
485                                                 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
486                         atomic64_set(&irq_params->previous_timestamp, vblank->time);
487                 }
488
489                 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
490                               acrtc->crtc_id,
491                               vrr_active);
492
493                 /* Core vblank handling is done here after end of front-porch in
494                  * vrr mode, as vblank timestamping will give valid results
495                  * while now done after front-porch. This will also deliver
496                  * page-flip completion events that have been queued to us
497                  * if a pageflip happened inside front-porch.
498                  */
499                 if (vrr_active) {
500                         drm_crtc_handle_vblank(&acrtc->base);
501
502                         /* BTR processing for pre-DCE12 ASICs */
503                         if (acrtc->dm_irq_params.stream &&
504                             adev->family < AMDGPU_FAMILY_AI) {
505                                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
506                                 mod_freesync_handle_v_update(
507                                     adev->dm.freesync_module,
508                                     acrtc->dm_irq_params.stream,
509                                     &acrtc->dm_irq_params.vrr_params);
510
511                                 dc_stream_adjust_vmin_vmax(
512                                     adev->dm.dc,
513                                     acrtc->dm_irq_params.stream,
514                                     &acrtc->dm_irq_params.vrr_params.adjust);
515                                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
516                         }
517                 }
518         }
519 }
520
521 /**
522  * dm_crtc_high_irq() - Handles CRTC interrupt
523  * @interrupt_params: used for determining the CRTC instance
524  *
525  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
526  * event handler.
527  */
528 static void dm_crtc_high_irq(void *interrupt_params)
529 {
530         struct common_irq_params *irq_params = interrupt_params;
531         struct amdgpu_device *adev = irq_params->adev;
532         struct amdgpu_crtc *acrtc;
533         unsigned long flags;
534         int vrr_active;
535
536         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
537         if (!acrtc)
538                 return;
539
540         vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
541
542         DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
543                       vrr_active, acrtc->dm_irq_params.active_planes);
544
545         /**
546          * Core vblank handling at start of front-porch is only possible
547          * in non-vrr mode, as only there vblank timestamping will give
548          * valid results while done in front-porch. Otherwise defer it
549          * to dm_vupdate_high_irq after end of front-porch.
550          */
551         if (!vrr_active)
552                 drm_crtc_handle_vblank(&acrtc->base);
553
554         /**
555          * Following stuff must happen at start of vblank, for crc
556          * computation and below-the-range btr support in vrr mode.
557          */
558         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
559
560         /* BTR updates need to happen before VUPDATE on Vega and above. */
561         if (adev->family < AMDGPU_FAMILY_AI)
562                 return;
563
564         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
565
566         if (acrtc->dm_irq_params.stream &&
567             acrtc->dm_irq_params.vrr_params.supported &&
568             acrtc->dm_irq_params.freesync_config.state ==
569                     VRR_STATE_ACTIVE_VARIABLE) {
570                 mod_freesync_handle_v_update(adev->dm.freesync_module,
571                                              acrtc->dm_irq_params.stream,
572                                              &acrtc->dm_irq_params.vrr_params);
573
574                 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
575                                            &acrtc->dm_irq_params.vrr_params.adjust);
576         }
577
578         /*
579          * If there aren't any active_planes then DCH HUBP may be clock-gated.
580          * In that case, pageflip completion interrupts won't fire and pageflip
581          * completion events won't get delivered. Prevent this by sending
582          * pending pageflip events from here if a flip is still pending.
583          *
584          * If any planes are enabled, use dm_pflip_high_irq() instead, to
585          * avoid race conditions between flip programming and completion,
586          * which could cause too early flip completion events.
587          */
588         if (adev->family >= AMDGPU_FAMILY_RV &&
589             acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
590             acrtc->dm_irq_params.active_planes == 0) {
591                 if (acrtc->event) {
592                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
593                         acrtc->event = NULL;
594                         drm_crtc_vblank_put(&acrtc->base);
595                 }
596                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
597         }
598
599         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
600 }
601
602 #if defined(CONFIG_DRM_AMD_DC_DCN)
603 /**
604  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
605  * DCN generation ASICs
606  * @interrupt params - interrupt parameters
607  *
608  * Used to set crc window/read out crc value at vertical line 0 position
609  */
610 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
611 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
612 {
613         struct common_irq_params *irq_params = interrupt_params;
614         struct amdgpu_device *adev = irq_params->adev;
615         struct amdgpu_crtc *acrtc;
616
617         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
618
619         if (!acrtc)
620                 return;
621
622         amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
623 }
624 #endif
625
626 /**
627  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
628  * @interrupt_params: used for determining the Outbox instance
629  *
630  * Handles the Outbox Interrupt
631  * event handler.
632  */
633 #define DMUB_TRACE_MAX_READ 64
634 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
635 {
636         struct dmub_notification notify;
637         struct common_irq_params *irq_params = interrupt_params;
638         struct amdgpu_device *adev = irq_params->adev;
639         struct amdgpu_display_manager *dm = &adev->dm;
640         struct dmcub_trace_buf_entry entry = { 0 };
641         uint32_t count = 0;
642
643         if (dc_enable_dmub_notifications(adev->dm.dc)) {
644                 if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
645                         do {
646                                 dc_stat_get_dmub_notification(adev->dm.dc, &notify);
647                         } while (notify.pending_notification);
648
649                         if (adev->dm.dmub_notify)
650                                 memcpy(adev->dm.dmub_notify, &notify, sizeof(struct dmub_notification));
651                         if (notify.type == DMUB_NOTIFICATION_AUX_REPLY)
652                                 complete(&adev->dm.dmub_aux_transfer_done);
653                         // TODO : HPD Implementation
654
655                 } else {
656                         DRM_ERROR("DM: Failed to receive correct outbox IRQ !");
657                 }
658         }
659
660
661         do {
662                 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
663                         trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
664                                                         entry.param0, entry.param1);
665
666                         DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
667                                  entry.trace_code, entry.tick_count, entry.param0, entry.param1);
668                 } else
669                         break;
670
671                 count++;
672
673         } while (count <= DMUB_TRACE_MAX_READ);
674
675         ASSERT(count <= DMUB_TRACE_MAX_READ);
676 }
677 #endif
678
679 static int dm_set_clockgating_state(void *handle,
680                   enum amd_clockgating_state state)
681 {
682         return 0;
683 }
684
685 static int dm_set_powergating_state(void *handle,
686                   enum amd_powergating_state state)
687 {
688         return 0;
689 }
690
691 /* Prototypes of private functions */
692 static int dm_early_init(void* handle);
693
694 /* Allocate memory for FBC compressed data  */
695 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
696 {
697         struct drm_device *dev = connector->dev;
698         struct amdgpu_device *adev = drm_to_adev(dev);
699         struct dm_compressor_info *compressor = &adev->dm.compressor;
700         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
701         struct drm_display_mode *mode;
702         unsigned long max_size = 0;
703
704         if (adev->dm.dc->fbc_compressor == NULL)
705                 return;
706
707         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
708                 return;
709
710         if (compressor->bo_ptr)
711                 return;
712
713
714         list_for_each_entry(mode, &connector->modes, head) {
715                 if (max_size < mode->htotal * mode->vtotal)
716                         max_size = mode->htotal * mode->vtotal;
717         }
718
719         if (max_size) {
720                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
721                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
722                             &compressor->gpu_addr, &compressor->cpu_addr);
723
724                 if (r)
725                         DRM_ERROR("DM: Failed to initialize FBC\n");
726                 else {
727                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
728                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
729                 }
730
731         }
732
733 }
734
735 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
736                                           int pipe, bool *enabled,
737                                           unsigned char *buf, int max_bytes)
738 {
739         struct drm_device *dev = dev_get_drvdata(kdev);
740         struct amdgpu_device *adev = drm_to_adev(dev);
741         struct drm_connector *connector;
742         struct drm_connector_list_iter conn_iter;
743         struct amdgpu_dm_connector *aconnector;
744         int ret = 0;
745
746         *enabled = false;
747
748         mutex_lock(&adev->dm.audio_lock);
749
750         drm_connector_list_iter_begin(dev, &conn_iter);
751         drm_for_each_connector_iter(connector, &conn_iter) {
752                 aconnector = to_amdgpu_dm_connector(connector);
753                 if (aconnector->audio_inst != port)
754                         continue;
755
756                 *enabled = true;
757                 ret = drm_eld_size(connector->eld);
758                 memcpy(buf, connector->eld, min(max_bytes, ret));
759
760                 break;
761         }
762         drm_connector_list_iter_end(&conn_iter);
763
764         mutex_unlock(&adev->dm.audio_lock);
765
766         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
767
768         return ret;
769 }
770
771 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
772         .get_eld = amdgpu_dm_audio_component_get_eld,
773 };
774
775 static int amdgpu_dm_audio_component_bind(struct device *kdev,
776                                        struct device *hda_kdev, void *data)
777 {
778         struct drm_device *dev = dev_get_drvdata(kdev);
779         struct amdgpu_device *adev = drm_to_adev(dev);
780         struct drm_audio_component *acomp = data;
781
782         acomp->ops = &amdgpu_dm_audio_component_ops;
783         acomp->dev = kdev;
784         adev->dm.audio_component = acomp;
785
786         return 0;
787 }
788
789 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
790                                           struct device *hda_kdev, void *data)
791 {
792         struct drm_device *dev = dev_get_drvdata(kdev);
793         struct amdgpu_device *adev = drm_to_adev(dev);
794         struct drm_audio_component *acomp = data;
795
796         acomp->ops = NULL;
797         acomp->dev = NULL;
798         adev->dm.audio_component = NULL;
799 }
800
801 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
802         .bind   = amdgpu_dm_audio_component_bind,
803         .unbind = amdgpu_dm_audio_component_unbind,
804 };
805
806 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
807 {
808         int i, ret;
809
810         if (!amdgpu_audio)
811                 return 0;
812
813         adev->mode_info.audio.enabled = true;
814
815         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
816
817         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
818                 adev->mode_info.audio.pin[i].channels = -1;
819                 adev->mode_info.audio.pin[i].rate = -1;
820                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
821                 adev->mode_info.audio.pin[i].status_bits = 0;
822                 adev->mode_info.audio.pin[i].category_code = 0;
823                 adev->mode_info.audio.pin[i].connected = false;
824                 adev->mode_info.audio.pin[i].id =
825                         adev->dm.dc->res_pool->audios[i]->inst;
826                 adev->mode_info.audio.pin[i].offset = 0;
827         }
828
829         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
830         if (ret < 0)
831                 return ret;
832
833         adev->dm.audio_registered = true;
834
835         return 0;
836 }
837
838 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
839 {
840         if (!amdgpu_audio)
841                 return;
842
843         if (!adev->mode_info.audio.enabled)
844                 return;
845
846         if (adev->dm.audio_registered) {
847                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
848                 adev->dm.audio_registered = false;
849         }
850
851         /* TODO: Disable audio? */
852
853         adev->mode_info.audio.enabled = false;
854 }
855
856 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
857 {
858         struct drm_audio_component *acomp = adev->dm.audio_component;
859
860         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
861                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
862
863                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
864                                                  pin, -1);
865         }
866 }
867
868 static int dm_dmub_hw_init(struct amdgpu_device *adev)
869 {
870         const struct dmcub_firmware_header_v1_0 *hdr;
871         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
872         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
873         const struct firmware *dmub_fw = adev->dm.dmub_fw;
874         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
875         struct abm *abm = adev->dm.dc->res_pool->abm;
876         struct dmub_srv_hw_params hw_params;
877         enum dmub_status status;
878         const unsigned char *fw_inst_const, *fw_bss_data;
879         uint32_t i, fw_inst_const_size, fw_bss_data_size;
880         bool has_hw_support;
881
882         if (!dmub_srv)
883                 /* DMUB isn't supported on the ASIC. */
884                 return 0;
885
886         if (!fb_info) {
887                 DRM_ERROR("No framebuffer info for DMUB service.\n");
888                 return -EINVAL;
889         }
890
891         if (!dmub_fw) {
892                 /* Firmware required for DMUB support. */
893                 DRM_ERROR("No firmware provided for DMUB.\n");
894                 return -EINVAL;
895         }
896
897         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
898         if (status != DMUB_STATUS_OK) {
899                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
900                 return -EINVAL;
901         }
902
903         if (!has_hw_support) {
904                 DRM_INFO("DMUB unsupported on ASIC\n");
905                 return 0;
906         }
907
908         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
909
910         fw_inst_const = dmub_fw->data +
911                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
912                         PSP_HEADER_BYTES;
913
914         fw_bss_data = dmub_fw->data +
915                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
916                       le32_to_cpu(hdr->inst_const_bytes);
917
918         /* Copy firmware and bios info into FB memory. */
919         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
920                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
921
922         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
923
924         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
925          * amdgpu_ucode_init_single_fw will load dmub firmware
926          * fw_inst_const part to cw0; otherwise, the firmware back door load
927          * will be done by dm_dmub_hw_init
928          */
929         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
930                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
931                                 fw_inst_const_size);
932         }
933
934         if (fw_bss_data_size)
935                 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
936                        fw_bss_data, fw_bss_data_size);
937
938         /* Copy firmware bios info into FB memory. */
939         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
940                adev->bios_size);
941
942         /* Reset regions that need to be reset. */
943         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
944         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
945
946         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
947                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
948
949         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
950                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
951
952         /* Initialize hardware. */
953         memset(&hw_params, 0, sizeof(hw_params));
954         hw_params.fb_base = adev->gmc.fb_start;
955         hw_params.fb_offset = adev->gmc.aper_base;
956
957         /* backdoor load firmware and trigger dmub running */
958         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
959                 hw_params.load_inst_const = true;
960
961         if (dmcu)
962                 hw_params.psp_version = dmcu->psp_version;
963
964         for (i = 0; i < fb_info->num_fb; ++i)
965                 hw_params.fb[i] = &fb_info->fb[i];
966
967         status = dmub_srv_hw_init(dmub_srv, &hw_params);
968         if (status != DMUB_STATUS_OK) {
969                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
970                 return -EINVAL;
971         }
972
973         /* Wait for firmware load to finish. */
974         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
975         if (status != DMUB_STATUS_OK)
976                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
977
978         /* Init DMCU and ABM if available. */
979         if (dmcu && abm) {
980                 dmcu->funcs->dmcu_init(dmcu);
981                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
982         }
983
984         adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
985         if (!adev->dm.dc->ctx->dmub_srv) {
986                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
987                 return -ENOMEM;
988         }
989
990         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
991                  adev->dm.dmcub_fw_version);
992
993         return 0;
994 }
995
996 #if defined(CONFIG_DRM_AMD_DC_DCN)
997 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
998 {
999         uint64_t pt_base;
1000         uint32_t logical_addr_low;
1001         uint32_t logical_addr_high;
1002         uint32_t agp_base, agp_bot, agp_top;
1003         PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1004
1005         logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1006         pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1007
1008         if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1009                 /*
1010                  * Raven2 has a HW issue that it is unable to use the vram which
1011                  * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1012                  * workaround that increase system aperture high address (add 1)
1013                  * to get rid of the VM fault and hardware hang.
1014                  */
1015                 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1016         else
1017                 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1018
1019         agp_base = 0;
1020         agp_bot = adev->gmc.agp_start >> 24;
1021         agp_top = adev->gmc.agp_end >> 24;
1022
1023
1024         page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1025         page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1026         page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1027         page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1028         page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1029         page_table_base.low_part = lower_32_bits(pt_base);
1030
1031         pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1032         pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1033
1034         pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1035         pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1036         pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1037
1038         pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1039         pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1040         pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1041
1042         pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1043         pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1044         pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1045
1046         pa_config->is_hvm_enabled = 0;
1047
1048 }
1049 #endif
1050 #if defined(CONFIG_DRM_AMD_DC_DCN)
1051 static void event_mall_stutter(struct work_struct *work)
1052 {
1053
1054         struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
1055         struct amdgpu_display_manager *dm = vblank_work->dm;
1056
1057         mutex_lock(&dm->dc_lock);
1058
1059         if (vblank_work->enable)
1060                 dm->active_vblank_irq_count++;
1061         else if(dm->active_vblank_irq_count)
1062                 dm->active_vblank_irq_count--;
1063
1064         dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1065
1066         DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1067
1068         mutex_unlock(&dm->dc_lock);
1069 }
1070
1071 static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
1072 {
1073
1074         int max_caps = dc->caps.max_links;
1075         struct vblank_workqueue *vblank_work;
1076         int i = 0;
1077
1078         vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
1079         if (ZERO_OR_NULL_PTR(vblank_work)) {
1080                 kfree(vblank_work);
1081                 return NULL;
1082         }
1083
1084         for (i = 0; i < max_caps; i++)
1085                 INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
1086
1087         return vblank_work;
1088 }
1089 #endif
1090 static int amdgpu_dm_init(struct amdgpu_device *adev)
1091 {
1092         struct dc_init_data init_data;
1093 #ifdef CONFIG_DRM_AMD_DC_HDCP
1094         struct dc_callback_init init_params;
1095 #endif
1096         int r;
1097
1098         adev->dm.ddev = adev_to_drm(adev);
1099         adev->dm.adev = adev;
1100
1101         /* Zero all the fields */
1102         memset(&init_data, 0, sizeof(init_data));
1103 #ifdef CONFIG_DRM_AMD_DC_HDCP
1104         memset(&init_params, 0, sizeof(init_params));
1105 #endif
1106
1107         mutex_init(&adev->dm.dc_lock);
1108         mutex_init(&adev->dm.audio_lock);
1109 #if defined(CONFIG_DRM_AMD_DC_DCN)
1110         spin_lock_init(&adev->dm.vblank_lock);
1111 #endif
1112
1113         if(amdgpu_dm_irq_init(adev)) {
1114                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1115                 goto error;
1116         }
1117
1118         init_data.asic_id.chip_family = adev->family;
1119
1120         init_data.asic_id.pci_revision_id = adev->pdev->revision;
1121         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1122
1123         init_data.asic_id.vram_width = adev->gmc.vram_width;
1124         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1125         init_data.asic_id.atombios_base_address =
1126                 adev->mode_info.atom_context->bios;
1127
1128         init_data.driver = adev;
1129
1130         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1131
1132         if (!adev->dm.cgs_device) {
1133                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1134                 goto error;
1135         }
1136
1137         init_data.cgs_device = adev->dm.cgs_device;
1138
1139         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1140
1141         switch (adev->asic_type) {
1142         case CHIP_CARRIZO:
1143         case CHIP_STONEY:
1144         case CHIP_RAVEN:
1145         case CHIP_RENOIR:
1146                 init_data.flags.gpu_vm_support = true;
1147                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1148                         init_data.flags.disable_dmcu = true;
1149                 break;
1150 #if defined(CONFIG_DRM_AMD_DC_DCN)
1151         case CHIP_VANGOGH:
1152                 init_data.flags.gpu_vm_support = true;
1153                 break;
1154 #endif
1155         default:
1156                 break;
1157         }
1158
1159         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1160                 init_data.flags.fbc_support = true;
1161
1162         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1163                 init_data.flags.multi_mon_pp_mclk_switch = true;
1164
1165         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1166                 init_data.flags.disable_fractional_pwm = true;
1167
1168         init_data.flags.power_down_display_on_boot = true;
1169
1170         INIT_LIST_HEAD(&adev->dm.da_list);
1171         /* Display Core create. */
1172         adev->dm.dc = dc_create(&init_data);
1173
1174         if (adev->dm.dc) {
1175                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1176         } else {
1177                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1178                 goto error;
1179         }
1180
1181         if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1182                 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1183                 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1184         }
1185
1186         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1187                 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1188
1189         if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1190                 adev->dm.dc->debug.disable_stutter = true;
1191
1192         if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1193                 adev->dm.dc->debug.disable_dsc = true;
1194
1195         if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1196                 adev->dm.dc->debug.disable_clock_gate = true;
1197
1198         r = dm_dmub_hw_init(adev);
1199         if (r) {
1200                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1201                 goto error;
1202         }
1203
1204         dc_hardware_init(adev->dm.dc);
1205
1206 #if defined(CONFIG_DRM_AMD_DC_DCN)
1207         if (adev->apu_flags) {
1208                 struct dc_phy_addr_space_config pa_config;
1209
1210                 mmhub_read_system_context(adev, &pa_config);
1211
1212                 // Call the DC init_memory func
1213                 dc_setup_system_context(adev->dm.dc, &pa_config);
1214         }
1215 #endif
1216
1217         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1218         if (!adev->dm.freesync_module) {
1219                 DRM_ERROR(
1220                 "amdgpu: failed to initialize freesync_module.\n");
1221         } else
1222                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1223                                 adev->dm.freesync_module);
1224
1225         amdgpu_dm_init_color_mod();
1226
1227 #if defined(CONFIG_DRM_AMD_DC_DCN)
1228         if (adev->dm.dc->caps.max_links > 0) {
1229                 adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
1230
1231                 if (!adev->dm.vblank_workqueue)
1232                         DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1233                 else
1234                         DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
1235         }
1236 #endif
1237
1238 #ifdef CONFIG_DRM_AMD_DC_HDCP
1239         if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1240                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1241
1242                 if (!adev->dm.hdcp_workqueue)
1243                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1244                 else
1245                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1246
1247                 dc_init_callbacks(adev->dm.dc, &init_params);
1248         }
1249 #endif
1250 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1251         adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1252 #endif
1253         if (dc_enable_dmub_notifications(adev->dm.dc)) {
1254                 init_completion(&adev->dm.dmub_aux_transfer_done);
1255                 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1256                 if (!adev->dm.dmub_notify) {
1257                         DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1258                         goto error;
1259                 }
1260                 amdgpu_dm_outbox_init(adev);
1261         }
1262
1263         if (amdgpu_dm_initialize_drm_device(adev)) {
1264                 DRM_ERROR(
1265                 "amdgpu: failed to initialize sw for display support.\n");
1266                 goto error;
1267         }
1268
1269         /* create fake encoders for MST */
1270         dm_dp_create_fake_mst_encoders(adev);
1271
1272         /* TODO: Add_display_info? */
1273
1274         /* TODO use dynamic cursor width */
1275         adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1276         adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1277
1278         if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1279                 DRM_ERROR(
1280                 "amdgpu: failed to initialize sw for display support.\n");
1281                 goto error;
1282         }
1283
1284
1285         DRM_DEBUG_DRIVER("KMS initialized.\n");
1286
1287         return 0;
1288 error:
1289         amdgpu_dm_fini(adev);
1290
1291         return -EINVAL;
1292 }
1293
1294 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1295 {
1296         int i;
1297
1298         for (i = 0; i < adev->dm.display_indexes_num; i++) {
1299                 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1300         }
1301
1302         amdgpu_dm_audio_fini(adev);
1303
1304         amdgpu_dm_destroy_drm_device(&adev->dm);
1305
1306 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1307         if (adev->dm.crc_rd_wrk) {
1308                 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1309                 kfree(adev->dm.crc_rd_wrk);
1310                 adev->dm.crc_rd_wrk = NULL;
1311         }
1312 #endif
1313 #ifdef CONFIG_DRM_AMD_DC_HDCP
1314         if (adev->dm.hdcp_workqueue) {
1315                 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1316                 adev->dm.hdcp_workqueue = NULL;
1317         }
1318
1319         if (adev->dm.dc)
1320                 dc_deinit_callbacks(adev->dm.dc);
1321 #endif
1322
1323 #if defined(CONFIG_DRM_AMD_DC_DCN)
1324         if (adev->dm.vblank_workqueue) {
1325                 adev->dm.vblank_workqueue->dm = NULL;
1326                 kfree(adev->dm.vblank_workqueue);
1327                 adev->dm.vblank_workqueue = NULL;
1328         }
1329 #endif
1330
1331         if (adev->dm.dc->ctx->dmub_srv) {
1332                 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1333                 adev->dm.dc->ctx->dmub_srv = NULL;
1334         }
1335
1336         if (dc_enable_dmub_notifications(adev->dm.dc)) {
1337                 kfree(adev->dm.dmub_notify);
1338                 adev->dm.dmub_notify = NULL;
1339         }
1340
1341         if (adev->dm.dmub_bo)
1342                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1343                                       &adev->dm.dmub_bo_gpu_addr,
1344                                       &adev->dm.dmub_bo_cpu_addr);
1345
1346         /* DC Destroy TODO: Replace destroy DAL */
1347         if (adev->dm.dc)
1348                 dc_destroy(&adev->dm.dc);
1349         /*
1350          * TODO: pageflip, vlank interrupt
1351          *
1352          * amdgpu_dm_irq_fini(adev);
1353          */
1354
1355         if (adev->dm.cgs_device) {
1356                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1357                 adev->dm.cgs_device = NULL;
1358         }
1359         if (adev->dm.freesync_module) {
1360                 mod_freesync_destroy(adev->dm.freesync_module);
1361                 adev->dm.freesync_module = NULL;
1362         }
1363
1364         mutex_destroy(&adev->dm.audio_lock);
1365         mutex_destroy(&adev->dm.dc_lock);
1366
1367         return;
1368 }
1369
1370 static int load_dmcu_fw(struct amdgpu_device *adev)
1371 {
1372         const char *fw_name_dmcu = NULL;
1373         int r;
1374         const struct dmcu_firmware_header_v1_0 *hdr;
1375
1376         switch(adev->asic_type) {
1377 #if defined(CONFIG_DRM_AMD_DC_SI)
1378         case CHIP_TAHITI:
1379         case CHIP_PITCAIRN:
1380         case CHIP_VERDE:
1381         case CHIP_OLAND:
1382 #endif
1383         case CHIP_BONAIRE:
1384         case CHIP_HAWAII:
1385         case CHIP_KAVERI:
1386         case CHIP_KABINI:
1387         case CHIP_MULLINS:
1388         case CHIP_TONGA:
1389         case CHIP_FIJI:
1390         case CHIP_CARRIZO:
1391         case CHIP_STONEY:
1392         case CHIP_POLARIS11:
1393         case CHIP_POLARIS10:
1394         case CHIP_POLARIS12:
1395         case CHIP_VEGAM:
1396         case CHIP_VEGA10:
1397         case CHIP_VEGA12:
1398         case CHIP_VEGA20:
1399         case CHIP_NAVI10:
1400         case CHIP_NAVI14:
1401         case CHIP_RENOIR:
1402         case CHIP_SIENNA_CICHLID:
1403         case CHIP_NAVY_FLOUNDER:
1404         case CHIP_DIMGREY_CAVEFISH:
1405         case CHIP_BEIGE_GOBY:
1406         case CHIP_VANGOGH:
1407                 return 0;
1408         case CHIP_NAVI12:
1409                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1410                 break;
1411         case CHIP_RAVEN:
1412                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1413                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1414                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1415                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1416                 else
1417                         return 0;
1418                 break;
1419         default:
1420                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1421                 return -EINVAL;
1422         }
1423
1424         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1425                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1426                 return 0;
1427         }
1428
1429         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1430         if (r == -ENOENT) {
1431                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1432                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1433                 adev->dm.fw_dmcu = NULL;
1434                 return 0;
1435         }
1436         if (r) {
1437                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1438                         fw_name_dmcu);
1439                 return r;
1440         }
1441
1442         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1443         if (r) {
1444                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1445                         fw_name_dmcu);
1446                 release_firmware(adev->dm.fw_dmcu);
1447                 adev->dm.fw_dmcu = NULL;
1448                 return r;
1449         }
1450
1451         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1452         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1453         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1454         adev->firmware.fw_size +=
1455                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1456
1457         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1458         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1459         adev->firmware.fw_size +=
1460                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1461
1462         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1463
1464         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1465
1466         return 0;
1467 }
1468
1469 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1470 {
1471         struct amdgpu_device *adev = ctx;
1472
1473         return dm_read_reg(adev->dm.dc->ctx, address);
1474 }
1475
1476 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1477                                      uint32_t value)
1478 {
1479         struct amdgpu_device *adev = ctx;
1480
1481         return dm_write_reg(adev->dm.dc->ctx, address, value);
1482 }
1483
1484 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1485 {
1486         struct dmub_srv_create_params create_params;
1487         struct dmub_srv_region_params region_params;
1488         struct dmub_srv_region_info region_info;
1489         struct dmub_srv_fb_params fb_params;
1490         struct dmub_srv_fb_info *fb_info;
1491         struct dmub_srv *dmub_srv;
1492         const struct dmcub_firmware_header_v1_0 *hdr;
1493         const char *fw_name_dmub;
1494         enum dmub_asic dmub_asic;
1495         enum dmub_status status;
1496         int r;
1497
1498         switch (adev->asic_type) {
1499         case CHIP_RENOIR:
1500                 dmub_asic = DMUB_ASIC_DCN21;
1501                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1502                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1503                         fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1504                 break;
1505         case CHIP_SIENNA_CICHLID:
1506                 dmub_asic = DMUB_ASIC_DCN30;
1507                 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1508                 break;
1509         case CHIP_NAVY_FLOUNDER:
1510                 dmub_asic = DMUB_ASIC_DCN30;
1511                 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1512                 break;
1513         case CHIP_VANGOGH:
1514                 dmub_asic = DMUB_ASIC_DCN301;
1515                 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1516                 break;
1517         case CHIP_DIMGREY_CAVEFISH:
1518                 dmub_asic = DMUB_ASIC_DCN302;
1519                 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1520                 break;
1521         case CHIP_BEIGE_GOBY:
1522                 dmub_asic = DMUB_ASIC_DCN303;
1523                 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1524                 break;
1525
1526         default:
1527                 /* ASIC doesn't support DMUB. */
1528                 return 0;
1529         }
1530
1531         r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1532         if (r) {
1533                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1534                 return 0;
1535         }
1536
1537         r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1538         if (r) {
1539                 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1540                 return 0;
1541         }
1542
1543         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1544
1545         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1546                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1547                         AMDGPU_UCODE_ID_DMCUB;
1548                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1549                         adev->dm.dmub_fw;
1550                 adev->firmware.fw_size +=
1551                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1552
1553                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1554                          adev->dm.dmcub_fw_version);
1555         }
1556
1557         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1558
1559         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1560         dmub_srv = adev->dm.dmub_srv;
1561
1562         if (!dmub_srv) {
1563                 DRM_ERROR("Failed to allocate DMUB service!\n");
1564                 return -ENOMEM;
1565         }
1566
1567         memset(&create_params, 0, sizeof(create_params));
1568         create_params.user_ctx = adev;
1569         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1570         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1571         create_params.asic = dmub_asic;
1572
1573         /* Create the DMUB service. */
1574         status = dmub_srv_create(dmub_srv, &create_params);
1575         if (status != DMUB_STATUS_OK) {
1576                 DRM_ERROR("Error creating DMUB service: %d\n", status);
1577                 return -EINVAL;
1578         }
1579
1580         /* Calculate the size of all the regions for the DMUB service. */
1581         memset(&region_params, 0, sizeof(region_params));
1582
1583         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1584                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1585         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1586         region_params.vbios_size = adev->bios_size;
1587         region_params.fw_bss_data = region_params.bss_data_size ?
1588                 adev->dm.dmub_fw->data +
1589                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1590                 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1591         region_params.fw_inst_const =
1592                 adev->dm.dmub_fw->data +
1593                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1594                 PSP_HEADER_BYTES;
1595
1596         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1597                                            &region_info);
1598
1599         if (status != DMUB_STATUS_OK) {
1600                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1601                 return -EINVAL;
1602         }
1603
1604         /*
1605          * Allocate a framebuffer based on the total size of all the regions.
1606          * TODO: Move this into GART.
1607          */
1608         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1609                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1610                                     &adev->dm.dmub_bo_gpu_addr,
1611                                     &adev->dm.dmub_bo_cpu_addr);
1612         if (r)
1613                 return r;
1614
1615         /* Rebase the regions on the framebuffer address. */
1616         memset(&fb_params, 0, sizeof(fb_params));
1617         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1618         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1619         fb_params.region_info = &region_info;
1620
1621         adev->dm.dmub_fb_info =
1622                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1623         fb_info = adev->dm.dmub_fb_info;
1624
1625         if (!fb_info) {
1626                 DRM_ERROR(
1627                         "Failed to allocate framebuffer info for DMUB service!\n");
1628                 return -ENOMEM;
1629         }
1630
1631         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1632         if (status != DMUB_STATUS_OK) {
1633                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1634                 return -EINVAL;
1635         }
1636
1637         return 0;
1638 }
1639
1640 static int dm_sw_init(void *handle)
1641 {
1642         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1643         int r;
1644
1645         r = dm_dmub_sw_init(adev);
1646         if (r)
1647                 return r;
1648
1649         return load_dmcu_fw(adev);
1650 }
1651
1652 static int dm_sw_fini(void *handle)
1653 {
1654         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1655
1656         kfree(adev->dm.dmub_fb_info);
1657         adev->dm.dmub_fb_info = NULL;
1658
1659         if (adev->dm.dmub_srv) {
1660                 dmub_srv_destroy(adev->dm.dmub_srv);
1661                 adev->dm.dmub_srv = NULL;
1662         }
1663
1664         release_firmware(adev->dm.dmub_fw);
1665         adev->dm.dmub_fw = NULL;
1666
1667         release_firmware(adev->dm.fw_dmcu);
1668         adev->dm.fw_dmcu = NULL;
1669
1670         return 0;
1671 }
1672
1673 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1674 {
1675         struct amdgpu_dm_connector *aconnector;
1676         struct drm_connector *connector;
1677         struct drm_connector_list_iter iter;
1678         int ret = 0;
1679
1680         drm_connector_list_iter_begin(dev, &iter);
1681         drm_for_each_connector_iter(connector, &iter) {
1682                 aconnector = to_amdgpu_dm_connector(connector);
1683                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1684                     aconnector->mst_mgr.aux) {
1685                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1686                                          aconnector,
1687                                          aconnector->base.base.id);
1688
1689                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1690                         if (ret < 0) {
1691                                 DRM_ERROR("DM_MST: Failed to start MST\n");
1692                                 aconnector->dc_link->type =
1693                                         dc_connection_single;
1694                                 break;
1695                         }
1696                 }
1697         }
1698         drm_connector_list_iter_end(&iter);
1699
1700         return ret;
1701 }
1702
1703 static int dm_late_init(void *handle)
1704 {
1705         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1706
1707         struct dmcu_iram_parameters params;
1708         unsigned int linear_lut[16];
1709         int i;
1710         struct dmcu *dmcu = NULL;
1711         bool ret = true;
1712
1713         dmcu = adev->dm.dc->res_pool->dmcu;
1714
1715         for (i = 0; i < 16; i++)
1716                 linear_lut[i] = 0xFFFF * i / 15;
1717
1718         params.set = 0;
1719         params.backlight_ramping_start = 0xCCCC;
1720         params.backlight_ramping_reduction = 0xCCCCCCCC;
1721         params.backlight_lut_array_size = 16;
1722         params.backlight_lut_array = linear_lut;
1723
1724         /* Min backlight level after ABM reduction,  Don't allow below 1%
1725          * 0xFFFF x 0.01 = 0x28F
1726          */
1727         params.min_abm_backlight = 0x28F;
1728
1729         /* In the case where abm is implemented on dmcub,
1730          * dmcu object will be null.
1731          * ABM 2.4 and up are implemented on dmcub.
1732          */
1733         if (dmcu)
1734                 ret = dmcu_load_iram(dmcu, params);
1735         else if (adev->dm.dc->ctx->dmub_srv)
1736                 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1737
1738         if (!ret)
1739                 return -EINVAL;
1740
1741         return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1742 }
1743
1744 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1745 {
1746         struct amdgpu_dm_connector *aconnector;
1747         struct drm_connector *connector;
1748         struct drm_connector_list_iter iter;
1749         struct drm_dp_mst_topology_mgr *mgr;
1750         int ret;
1751         bool need_hotplug = false;
1752
1753         drm_connector_list_iter_begin(dev, &iter);
1754         drm_for_each_connector_iter(connector, &iter) {
1755                 aconnector = to_amdgpu_dm_connector(connector);
1756                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1757                     aconnector->mst_port)
1758                         continue;
1759
1760                 mgr = &aconnector->mst_mgr;
1761
1762                 if (suspend) {
1763                         drm_dp_mst_topology_mgr_suspend(mgr);
1764                 } else {
1765                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1766                         if (ret < 0) {
1767                                 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1768                                 need_hotplug = true;
1769                         }
1770                 }
1771         }
1772         drm_connector_list_iter_end(&iter);
1773
1774         if (need_hotplug)
1775                 drm_kms_helper_hotplug_event(dev);
1776 }
1777
1778 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1779 {
1780         struct smu_context *smu = &adev->smu;
1781         int ret = 0;
1782
1783         if (!is_support_sw_smu(adev))
1784                 return 0;
1785
1786         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1787          * on window driver dc implementation.
1788          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1789          * should be passed to smu during boot up and resume from s3.
1790          * boot up: dc calculate dcn watermark clock settings within dc_create,
1791          * dcn20_resource_construct
1792          * then call pplib functions below to pass the settings to smu:
1793          * smu_set_watermarks_for_clock_ranges
1794          * smu_set_watermarks_table
1795          * navi10_set_watermarks_table
1796          * smu_write_watermarks_table
1797          *
1798          * For Renoir, clock settings of dcn watermark are also fixed values.
1799          * dc has implemented different flow for window driver:
1800          * dc_hardware_init / dc_set_power_state
1801          * dcn10_init_hw
1802          * notify_wm_ranges
1803          * set_wm_ranges
1804          * -- Linux
1805          * smu_set_watermarks_for_clock_ranges
1806          * renoir_set_watermarks_table
1807          * smu_write_watermarks_table
1808          *
1809          * For Linux,
1810          * dc_hardware_init -> amdgpu_dm_init
1811          * dc_set_power_state --> dm_resume
1812          *
1813          * therefore, this function apply to navi10/12/14 but not Renoir
1814          * *
1815          */
1816         switch(adev->asic_type) {
1817         case CHIP_NAVI10:
1818         case CHIP_NAVI14:
1819         case CHIP_NAVI12:
1820                 break;
1821         default:
1822                 return 0;
1823         }
1824
1825         ret = smu_write_watermarks_table(smu);
1826         if (ret) {
1827                 DRM_ERROR("Failed to update WMTABLE!\n");
1828                 return ret;
1829         }
1830
1831         return 0;
1832 }
1833
1834 /**
1835  * dm_hw_init() - Initialize DC device
1836  * @handle: The base driver device containing the amdgpu_dm device.
1837  *
1838  * Initialize the &struct amdgpu_display_manager device. This involves calling
1839  * the initializers of each DM component, then populating the struct with them.
1840  *
1841  * Although the function implies hardware initialization, both hardware and
1842  * software are initialized here. Splitting them out to their relevant init
1843  * hooks is a future TODO item.
1844  *
1845  * Some notable things that are initialized here:
1846  *
1847  * - Display Core, both software and hardware
1848  * - DC modules that we need (freesync and color management)
1849  * - DRM software states
1850  * - Interrupt sources and handlers
1851  * - Vblank support
1852  * - Debug FS entries, if enabled
1853  */
1854 static int dm_hw_init(void *handle)
1855 {
1856         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1857         /* Create DAL display manager */
1858         amdgpu_dm_init(adev);
1859         amdgpu_dm_hpd_init(adev);
1860
1861         return 0;
1862 }
1863
1864 /**
1865  * dm_hw_fini() - Teardown DC device
1866  * @handle: The base driver device containing the amdgpu_dm device.
1867  *
1868  * Teardown components within &struct amdgpu_display_manager that require
1869  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1870  * were loaded. Also flush IRQ workqueues and disable them.
1871  */
1872 static int dm_hw_fini(void *handle)
1873 {
1874         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1875
1876         amdgpu_dm_hpd_fini(adev);
1877
1878         amdgpu_dm_irq_fini(adev);
1879         amdgpu_dm_fini(adev);
1880         return 0;
1881 }
1882
1883
1884 static int dm_enable_vblank(struct drm_crtc *crtc);
1885 static void dm_disable_vblank(struct drm_crtc *crtc);
1886
1887 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1888                                  struct dc_state *state, bool enable)
1889 {
1890         enum dc_irq_source irq_source;
1891         struct amdgpu_crtc *acrtc;
1892         int rc = -EBUSY;
1893         int i = 0;
1894
1895         for (i = 0; i < state->stream_count; i++) {
1896                 acrtc = get_crtc_by_otg_inst(
1897                                 adev, state->stream_status[i].primary_otg_inst);
1898
1899                 if (acrtc && state->stream_status[i].plane_count != 0) {
1900                         irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1901                         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1902                         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
1903                                       acrtc->crtc_id, enable ? "en" : "dis", rc);
1904                         if (rc)
1905                                 DRM_WARN("Failed to %s pflip interrupts\n",
1906                                          enable ? "enable" : "disable");
1907
1908                         if (enable) {
1909                                 rc = dm_enable_vblank(&acrtc->base);
1910                                 if (rc)
1911                                         DRM_WARN("Failed to enable vblank interrupts\n");
1912                         } else {
1913                                 dm_disable_vblank(&acrtc->base);
1914                         }
1915
1916                 }
1917         }
1918
1919 }
1920
1921 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1922 {
1923         struct dc_state *context = NULL;
1924         enum dc_status res = DC_ERROR_UNEXPECTED;
1925         int i;
1926         struct dc_stream_state *del_streams[MAX_PIPES];
1927         int del_streams_count = 0;
1928
1929         memset(del_streams, 0, sizeof(del_streams));
1930
1931         context = dc_create_state(dc);
1932         if (context == NULL)
1933                 goto context_alloc_fail;
1934
1935         dc_resource_state_copy_construct_current(dc, context);
1936
1937         /* First remove from context all streams */
1938         for (i = 0; i < context->stream_count; i++) {
1939                 struct dc_stream_state *stream = context->streams[i];
1940
1941                 del_streams[del_streams_count++] = stream;
1942         }
1943
1944         /* Remove all planes for removed streams and then remove the streams */
1945         for (i = 0; i < del_streams_count; i++) {
1946                 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1947                         res = DC_FAIL_DETACH_SURFACES;
1948                         goto fail;
1949                 }
1950
1951                 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1952                 if (res != DC_OK)
1953                         goto fail;
1954         }
1955
1956
1957         res = dc_validate_global_state(dc, context, false);
1958
1959         if (res != DC_OK) {
1960                 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1961                 goto fail;
1962         }
1963
1964         res = dc_commit_state(dc, context);
1965
1966 fail:
1967         dc_release_state(context);
1968
1969 context_alloc_fail:
1970         return res;
1971 }
1972
1973 static int dm_suspend(void *handle)
1974 {
1975         struct amdgpu_device *adev = handle;
1976         struct amdgpu_display_manager *dm = &adev->dm;
1977         int ret = 0;
1978
1979         if (amdgpu_in_reset(adev)) {
1980                 mutex_lock(&dm->dc_lock);
1981
1982 #if defined(CONFIG_DRM_AMD_DC_DCN)
1983                 dc_allow_idle_optimizations(adev->dm.dc, false);
1984 #endif
1985
1986                 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1987
1988                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1989
1990                 amdgpu_dm_commit_zero_streams(dm->dc);
1991
1992                 amdgpu_dm_irq_suspend(adev);
1993
1994                 return ret;
1995         }
1996
1997         WARN_ON(adev->dm.cached_state);
1998         adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1999
2000         s3_handle_mst(adev_to_drm(adev), true);
2001
2002         amdgpu_dm_irq_suspend(adev);
2003
2004
2005         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2006
2007         return 0;
2008 }
2009
2010 static struct amdgpu_dm_connector *
2011 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2012                                              struct drm_crtc *crtc)
2013 {
2014         uint32_t i;
2015         struct drm_connector_state *new_con_state;
2016         struct drm_connector *connector;
2017         struct drm_crtc *crtc_from_state;
2018
2019         for_each_new_connector_in_state(state, connector, new_con_state, i) {
2020                 crtc_from_state = new_con_state->crtc;
2021
2022                 if (crtc_from_state == crtc)
2023                         return to_amdgpu_dm_connector(connector);
2024         }
2025
2026         return NULL;
2027 }
2028
2029 static void emulated_link_detect(struct dc_link *link)
2030 {
2031         struct dc_sink_init_data sink_init_data = { 0 };
2032         struct display_sink_capability sink_caps = { 0 };
2033         enum dc_edid_status edid_status;
2034         struct dc_context *dc_ctx = link->ctx;
2035         struct dc_sink *sink = NULL;
2036         struct dc_sink *prev_sink = NULL;
2037
2038         link->type = dc_connection_none;
2039         prev_sink = link->local_sink;
2040
2041         if (prev_sink)
2042                 dc_sink_release(prev_sink);
2043
2044         switch (link->connector_signal) {
2045         case SIGNAL_TYPE_HDMI_TYPE_A: {
2046                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2047                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2048                 break;
2049         }
2050
2051         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2052                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2053                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2054                 break;
2055         }
2056
2057         case SIGNAL_TYPE_DVI_DUAL_LINK: {
2058                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2059                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2060                 break;
2061         }
2062
2063         case SIGNAL_TYPE_LVDS: {
2064                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2065                 sink_caps.signal = SIGNAL_TYPE_LVDS;
2066                 break;
2067         }
2068
2069         case SIGNAL_TYPE_EDP: {
2070                 sink_caps.transaction_type =
2071                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2072                 sink_caps.signal = SIGNAL_TYPE_EDP;
2073                 break;
2074         }
2075
2076         case SIGNAL_TYPE_DISPLAY_PORT: {
2077                 sink_caps.transaction_type =
2078                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2079                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2080                 break;
2081         }
2082
2083         default:
2084                 DC_ERROR("Invalid connector type! signal:%d\n",
2085                         link->connector_signal);
2086                 return;
2087         }
2088
2089         sink_init_data.link = link;
2090         sink_init_data.sink_signal = sink_caps.signal;
2091
2092         sink = dc_sink_create(&sink_init_data);
2093         if (!sink) {
2094                 DC_ERROR("Failed to create sink!\n");
2095                 return;
2096         }
2097
2098         /* dc_sink_create returns a new reference */
2099         link->local_sink = sink;
2100
2101         edid_status = dm_helpers_read_local_edid(
2102                         link->ctx,
2103                         link,
2104                         sink);
2105
2106         if (edid_status != EDID_OK)
2107                 DC_ERROR("Failed to read EDID");
2108
2109 }
2110
2111 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2112                                      struct amdgpu_display_manager *dm)
2113 {
2114         struct {
2115                 struct dc_surface_update surface_updates[MAX_SURFACES];
2116                 struct dc_plane_info plane_infos[MAX_SURFACES];
2117                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2118                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2119                 struct dc_stream_update stream_update;
2120         } * bundle;
2121         int k, m;
2122
2123         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2124
2125         if (!bundle) {
2126                 dm_error("Failed to allocate update bundle\n");
2127                 goto cleanup;
2128         }
2129
2130         for (k = 0; k < dc_state->stream_count; k++) {
2131                 bundle->stream_update.stream = dc_state->streams[k];
2132
2133                 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2134                         bundle->surface_updates[m].surface =
2135                                 dc_state->stream_status->plane_states[m];
2136                         bundle->surface_updates[m].surface->force_full_update =
2137                                 true;
2138                 }
2139                 dc_commit_updates_for_stream(
2140                         dm->dc, bundle->surface_updates,
2141                         dc_state->stream_status->plane_count,
2142                         dc_state->streams[k], &bundle->stream_update, dc_state);
2143         }
2144
2145 cleanup:
2146         kfree(bundle);
2147
2148         return;
2149 }
2150
2151 static void dm_set_dpms_off(struct dc_link *link)
2152 {
2153         struct dc_stream_state *stream_state;
2154         struct amdgpu_dm_connector *aconnector = link->priv;
2155         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2156         struct dc_stream_update stream_update;
2157         bool dpms_off = true;
2158
2159         memset(&stream_update, 0, sizeof(stream_update));
2160         stream_update.dpms_off = &dpms_off;
2161
2162         mutex_lock(&adev->dm.dc_lock);
2163         stream_state = dc_stream_find_from_link(link);
2164
2165         if (stream_state == NULL) {
2166                 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2167                 mutex_unlock(&adev->dm.dc_lock);
2168                 return;
2169         }
2170
2171         stream_update.stream = stream_state;
2172         dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2173                                      stream_state, &stream_update,
2174                                      stream_state->ctx->dc->current_state);
2175         mutex_unlock(&adev->dm.dc_lock);
2176 }
2177
2178 static int dm_resume(void *handle)
2179 {
2180         struct amdgpu_device *adev = handle;
2181         struct drm_device *ddev = adev_to_drm(adev);
2182         struct amdgpu_display_manager *dm = &adev->dm;
2183         struct amdgpu_dm_connector *aconnector;
2184         struct drm_connector *connector;
2185         struct drm_connector_list_iter iter;
2186         struct drm_crtc *crtc;
2187         struct drm_crtc_state *new_crtc_state;
2188         struct dm_crtc_state *dm_new_crtc_state;
2189         struct drm_plane *plane;
2190         struct drm_plane_state *new_plane_state;
2191         struct dm_plane_state *dm_new_plane_state;
2192         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2193         enum dc_connection_type new_connection_type = dc_connection_none;
2194         struct dc_state *dc_state;
2195         int i, r, j;
2196
2197         if (amdgpu_in_reset(adev)) {
2198                 dc_state = dm->cached_dc_state;
2199
2200                 r = dm_dmub_hw_init(adev);
2201                 if (r)
2202                         DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2203
2204                 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2205                 dc_resume(dm->dc);
2206
2207                 amdgpu_dm_irq_resume_early(adev);
2208
2209                 for (i = 0; i < dc_state->stream_count; i++) {
2210                         dc_state->streams[i]->mode_changed = true;
2211                         for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2212                                 dc_state->stream_status->plane_states[j]->update_flags.raw
2213                                         = 0xffffffff;
2214                         }
2215                 }
2216
2217                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2218
2219                 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2220
2221                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2222
2223                 dc_release_state(dm->cached_dc_state);
2224                 dm->cached_dc_state = NULL;
2225
2226                 amdgpu_dm_irq_resume_late(adev);
2227
2228                 mutex_unlock(&dm->dc_lock);
2229
2230                 return 0;
2231         }
2232         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2233         dc_release_state(dm_state->context);
2234         dm_state->context = dc_create_state(dm->dc);
2235         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2236         dc_resource_state_construct(dm->dc, dm_state->context);
2237
2238         /* Before powering on DC we need to re-initialize DMUB. */
2239         r = dm_dmub_hw_init(adev);
2240         if (r)
2241                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2242
2243         /* power on hardware */
2244         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2245
2246         /* program HPD filter */
2247         dc_resume(dm->dc);
2248
2249         /*
2250          * early enable HPD Rx IRQ, should be done before set mode as short
2251          * pulse interrupts are used for MST
2252          */
2253         amdgpu_dm_irq_resume_early(adev);
2254
2255         /* On resume we need to rewrite the MSTM control bits to enable MST*/
2256         s3_handle_mst(ddev, false);
2257
2258         /* Do detection*/
2259         drm_connector_list_iter_begin(ddev, &iter);
2260         drm_for_each_connector_iter(connector, &iter) {
2261                 aconnector = to_amdgpu_dm_connector(connector);
2262
2263                 /*
2264                  * this is the case when traversing through already created
2265                  * MST connectors, should be skipped
2266                  */
2267                 if (aconnector->mst_port)
2268                         continue;
2269
2270                 mutex_lock(&aconnector->hpd_lock);
2271                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2272                         DRM_ERROR("KMS: Failed to detect connector\n");
2273
2274                 if (aconnector->base.force && new_connection_type == dc_connection_none)
2275                         emulated_link_detect(aconnector->dc_link);
2276                 else
2277                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2278
2279                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2280                         aconnector->fake_enable = false;
2281
2282                 if (aconnector->dc_sink)
2283                         dc_sink_release(aconnector->dc_sink);
2284                 aconnector->dc_sink = NULL;
2285                 amdgpu_dm_update_connector_after_detect(aconnector);
2286                 mutex_unlock(&aconnector->hpd_lock);
2287         }
2288         drm_connector_list_iter_end(&iter);
2289
2290         /* Force mode set in atomic commit */
2291         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2292                 new_crtc_state->active_changed = true;
2293
2294         /*
2295          * atomic_check is expected to create the dc states. We need to release
2296          * them here, since they were duplicated as part of the suspend
2297          * procedure.
2298          */
2299         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2300                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2301                 if (dm_new_crtc_state->stream) {
2302                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2303                         dc_stream_release(dm_new_crtc_state->stream);
2304                         dm_new_crtc_state->stream = NULL;
2305                 }
2306         }
2307
2308         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2309                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2310                 if (dm_new_plane_state->dc_state) {
2311                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2312                         dc_plane_state_release(dm_new_plane_state->dc_state);
2313                         dm_new_plane_state->dc_state = NULL;
2314                 }
2315         }
2316
2317         drm_atomic_helper_resume(ddev, dm->cached_state);
2318
2319         dm->cached_state = NULL;
2320
2321         amdgpu_dm_irq_resume_late(adev);
2322
2323         amdgpu_dm_smu_write_watermarks_table(adev);
2324
2325         return 0;
2326 }
2327
2328 /**
2329  * DOC: DM Lifecycle
2330  *
2331  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2332  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2333  * the base driver's device list to be initialized and torn down accordingly.
2334  *
2335  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2336  */
2337
2338 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2339         .name = "dm",
2340         .early_init = dm_early_init,
2341         .late_init = dm_late_init,
2342         .sw_init = dm_sw_init,
2343         .sw_fini = dm_sw_fini,
2344         .hw_init = dm_hw_init,
2345         .hw_fini = dm_hw_fini,
2346         .suspend = dm_suspend,
2347         .resume = dm_resume,
2348         .is_idle = dm_is_idle,
2349         .wait_for_idle = dm_wait_for_idle,
2350         .check_soft_reset = dm_check_soft_reset,
2351         .soft_reset = dm_soft_reset,
2352         .set_clockgating_state = dm_set_clockgating_state,
2353         .set_powergating_state = dm_set_powergating_state,
2354 };
2355
2356 const struct amdgpu_ip_block_version dm_ip_block =
2357 {
2358         .type = AMD_IP_BLOCK_TYPE_DCE,
2359         .major = 1,
2360         .minor = 0,
2361         .rev = 0,
2362         .funcs = &amdgpu_dm_funcs,
2363 };
2364
2365
2366 /**
2367  * DOC: atomic
2368  *
2369  * *WIP*
2370  */
2371
2372 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2373         .fb_create = amdgpu_display_user_framebuffer_create,
2374         .get_format_info = amd_get_format_info,
2375         .output_poll_changed = drm_fb_helper_output_poll_changed,
2376         .atomic_check = amdgpu_dm_atomic_check,
2377         .atomic_commit = drm_atomic_helper_commit,
2378 };
2379
2380 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2381         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2382 };
2383
2384 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2385 {
2386         u32 max_cll, min_cll, max, min, q, r;
2387         struct amdgpu_dm_backlight_caps *caps;
2388         struct amdgpu_display_manager *dm;
2389         struct drm_connector *conn_base;
2390         struct amdgpu_device *adev;
2391         struct dc_link *link = NULL;
2392         static const u8 pre_computed_values[] = {
2393                 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2394                 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2395
2396         if (!aconnector || !aconnector->dc_link)
2397                 return;
2398
2399         link = aconnector->dc_link;
2400         if (link->connector_signal != SIGNAL_TYPE_EDP)
2401                 return;
2402
2403         conn_base = &aconnector->base;
2404         adev = drm_to_adev(conn_base->dev);
2405         dm = &adev->dm;
2406         caps = &dm->backlight_caps;
2407         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2408         caps->aux_support = false;
2409         max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2410         min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2411
2412         if (caps->ext_caps->bits.oled == 1 ||
2413             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2414             caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2415                 caps->aux_support = true;
2416
2417         if (amdgpu_backlight == 0)
2418                 caps->aux_support = false;
2419         else if (amdgpu_backlight == 1)
2420                 caps->aux_support = true;
2421
2422         /* From the specification (CTA-861-G), for calculating the maximum
2423          * luminance we need to use:
2424          *      Luminance = 50*2**(CV/32)
2425          * Where CV is a one-byte value.
2426          * For calculating this expression we may need float point precision;
2427          * to avoid this complexity level, we take advantage that CV is divided
2428          * by a constant. From the Euclids division algorithm, we know that CV
2429          * can be written as: CV = 32*q + r. Next, we replace CV in the
2430          * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2431          * need to pre-compute the value of r/32. For pre-computing the values
2432          * We just used the following Ruby line:
2433          *      (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2434          * The results of the above expressions can be verified at
2435          * pre_computed_values.
2436          */
2437         q = max_cll >> 5;
2438         r = max_cll % 32;
2439         max = (1 << q) * pre_computed_values[r];
2440
2441         // min luminance: maxLum * (CV/255)^2 / 100
2442         q = DIV_ROUND_CLOSEST(min_cll, 255);
2443         min = max * DIV_ROUND_CLOSEST((q * q), 100);
2444
2445         caps->aux_max_input_signal = max;
2446         caps->aux_min_input_signal = min;
2447 }
2448
2449 void amdgpu_dm_update_connector_after_detect(
2450                 struct amdgpu_dm_connector *aconnector)
2451 {
2452         struct drm_connector *connector = &aconnector->base;
2453         struct drm_device *dev = connector->dev;
2454         struct dc_sink *sink;
2455
2456         /* MST handled by drm_mst framework */
2457         if (aconnector->mst_mgr.mst_state == true)
2458                 return;
2459
2460         sink = aconnector->dc_link->local_sink;
2461         if (sink)
2462                 dc_sink_retain(sink);
2463
2464         /*
2465          * Edid mgmt connector gets first update only in mode_valid hook and then
2466          * the connector sink is set to either fake or physical sink depends on link status.
2467          * Skip if already done during boot.
2468          */
2469         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2470                         && aconnector->dc_em_sink) {
2471
2472                 /*
2473                  * For S3 resume with headless use eml_sink to fake stream
2474                  * because on resume connector->sink is set to NULL
2475                  */
2476                 mutex_lock(&dev->mode_config.mutex);
2477
2478                 if (sink) {
2479                         if (aconnector->dc_sink) {
2480                                 amdgpu_dm_update_freesync_caps(connector, NULL);
2481                                 /*
2482                                  * retain and release below are used to
2483                                  * bump up refcount for sink because the link doesn't point
2484                                  * to it anymore after disconnect, so on next crtc to connector
2485                                  * reshuffle by UMD we will get into unwanted dc_sink release
2486                                  */
2487                                 dc_sink_release(aconnector->dc_sink);
2488                         }
2489                         aconnector->dc_sink = sink;
2490                         dc_sink_retain(aconnector->dc_sink);
2491                         amdgpu_dm_update_freesync_caps(connector,
2492                                         aconnector->edid);
2493                 } else {
2494                         amdgpu_dm_update_freesync_caps(connector, NULL);
2495                         if (!aconnector->dc_sink) {
2496                                 aconnector->dc_sink = aconnector->dc_em_sink;
2497                                 dc_sink_retain(aconnector->dc_sink);
2498                         }
2499                 }
2500
2501                 mutex_unlock(&dev->mode_config.mutex);
2502
2503                 if (sink)
2504                         dc_sink_release(sink);
2505                 return;
2506         }
2507
2508         /*
2509          * TODO: temporary guard to look for proper fix
2510          * if this sink is MST sink, we should not do anything
2511          */
2512         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2513                 dc_sink_release(sink);
2514                 return;
2515         }
2516
2517         if (aconnector->dc_sink == sink) {
2518                 /*
2519                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
2520                  * Do nothing!!
2521                  */
2522                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2523                                 aconnector->connector_id);
2524                 if (sink)
2525                         dc_sink_release(sink);
2526                 return;
2527         }
2528
2529         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2530                 aconnector->connector_id, aconnector->dc_sink, sink);
2531
2532         mutex_lock(&dev->mode_config.mutex);
2533
2534         /*
2535          * 1. Update status of the drm connector
2536          * 2. Send an event and let userspace tell us what to do
2537          */
2538         if (sink) {
2539                 /*
2540                  * TODO: check if we still need the S3 mode update workaround.
2541                  * If yes, put it here.
2542                  */
2543                 if (aconnector->dc_sink) {
2544                         amdgpu_dm_update_freesync_caps(connector, NULL);
2545                         dc_sink_release(aconnector->dc_sink);
2546                 }
2547
2548                 aconnector->dc_sink = sink;
2549                 dc_sink_retain(aconnector->dc_sink);
2550                 if (sink->dc_edid.length == 0) {
2551                         aconnector->edid = NULL;
2552                         if (aconnector->dc_link->aux_mode) {
2553                                 drm_dp_cec_unset_edid(
2554                                         &aconnector->dm_dp_aux.aux);
2555                         }
2556                 } else {
2557                         aconnector->edid =
2558                                 (struct edid *)sink->dc_edid.raw_edid;
2559
2560                         drm_connector_update_edid_property(connector,
2561                                                            aconnector->edid);
2562                         if (aconnector->dc_link->aux_mode)
2563                                 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2564                                                     aconnector->edid);
2565                 }
2566
2567                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2568                 update_connector_ext_caps(aconnector);
2569         } else {
2570                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2571                 amdgpu_dm_update_freesync_caps(connector, NULL);
2572                 drm_connector_update_edid_property(connector, NULL);
2573                 aconnector->num_modes = 0;
2574                 dc_sink_release(aconnector->dc_sink);
2575                 aconnector->dc_sink = NULL;
2576                 aconnector->edid = NULL;
2577 #ifdef CONFIG_DRM_AMD_DC_HDCP
2578                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2579                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2580                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2581 #endif
2582         }
2583
2584         mutex_unlock(&dev->mode_config.mutex);
2585
2586         update_subconnector_property(aconnector);
2587
2588         if (sink)
2589                 dc_sink_release(sink);
2590 }
2591
2592 static void handle_hpd_irq(void *param)
2593 {
2594         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2595         struct drm_connector *connector = &aconnector->base;
2596         struct drm_device *dev = connector->dev;
2597         enum dc_connection_type new_connection_type = dc_connection_none;
2598         struct amdgpu_device *adev = drm_to_adev(dev);
2599 #ifdef CONFIG_DRM_AMD_DC_HDCP
2600         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2601 #endif
2602
2603         if (adev->dm.disable_hpd_irq)
2604                 return;
2605
2606         /*
2607          * In case of failure or MST no need to update connector status or notify the OS
2608          * since (for MST case) MST does this in its own context.
2609          */
2610         mutex_lock(&aconnector->hpd_lock);
2611
2612 #ifdef CONFIG_DRM_AMD_DC_HDCP
2613         if (adev->dm.hdcp_workqueue) {
2614                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2615                 dm_con_state->update_hdcp = true;
2616         }
2617 #endif
2618         if (aconnector->fake_enable)
2619                 aconnector->fake_enable = false;
2620
2621         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2622                 DRM_ERROR("KMS: Failed to detect connector\n");
2623
2624         if (aconnector->base.force && new_connection_type == dc_connection_none) {
2625                 emulated_link_detect(aconnector->dc_link);
2626
2627
2628                 drm_modeset_lock_all(dev);
2629                 dm_restore_drm_connector_state(dev, connector);
2630                 drm_modeset_unlock_all(dev);
2631
2632                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2633                         drm_kms_helper_hotplug_event(dev);
2634
2635         } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2636                 if (new_connection_type == dc_connection_none &&
2637                     aconnector->dc_link->type == dc_connection_none)
2638                         dm_set_dpms_off(aconnector->dc_link);
2639
2640                 amdgpu_dm_update_connector_after_detect(aconnector);
2641
2642                 drm_modeset_lock_all(dev);
2643                 dm_restore_drm_connector_state(dev, connector);
2644                 drm_modeset_unlock_all(dev);
2645
2646                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2647                         drm_kms_helper_hotplug_event(dev);
2648         }
2649         mutex_unlock(&aconnector->hpd_lock);
2650
2651 }
2652
2653 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2654 {
2655         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2656         uint8_t dret;
2657         bool new_irq_handled = false;
2658         int dpcd_addr;
2659         int dpcd_bytes_to_read;
2660
2661         const int max_process_count = 30;
2662         int process_count = 0;
2663
2664         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2665
2666         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2667                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2668                 /* DPCD 0x200 - 0x201 for downstream IRQ */
2669                 dpcd_addr = DP_SINK_COUNT;
2670         } else {
2671                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2672                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2673                 dpcd_addr = DP_SINK_COUNT_ESI;
2674         }
2675
2676         dret = drm_dp_dpcd_read(
2677                 &aconnector->dm_dp_aux.aux,
2678                 dpcd_addr,
2679                 esi,
2680                 dpcd_bytes_to_read);
2681
2682         while (dret == dpcd_bytes_to_read &&
2683                 process_count < max_process_count) {
2684                 uint8_t retry;
2685                 dret = 0;
2686
2687                 process_count++;
2688
2689                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2690                 /* handle HPD short pulse irq */
2691                 if (aconnector->mst_mgr.mst_state)
2692                         drm_dp_mst_hpd_irq(
2693                                 &aconnector->mst_mgr,
2694                                 esi,
2695                                 &new_irq_handled);
2696
2697                 if (new_irq_handled) {
2698                         /* ACK at DPCD to notify down stream */
2699                         const int ack_dpcd_bytes_to_write =
2700                                 dpcd_bytes_to_read - 1;
2701
2702                         for (retry = 0; retry < 3; retry++) {
2703                                 uint8_t wret;
2704
2705                                 wret = drm_dp_dpcd_write(
2706                                         &aconnector->dm_dp_aux.aux,
2707                                         dpcd_addr + 1,
2708                                         &esi[1],
2709                                         ack_dpcd_bytes_to_write);
2710                                 if (wret == ack_dpcd_bytes_to_write)
2711                                         break;
2712                         }
2713
2714                         /* check if there is new irq to be handled */
2715                         dret = drm_dp_dpcd_read(
2716                                 &aconnector->dm_dp_aux.aux,
2717                                 dpcd_addr,
2718                                 esi,
2719                                 dpcd_bytes_to_read);
2720
2721                         new_irq_handled = false;
2722                 } else {
2723                         break;
2724                 }
2725         }
2726
2727         if (process_count == max_process_count)
2728                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2729 }
2730
2731 static void handle_hpd_rx_irq(void *param)
2732 {
2733         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2734         struct drm_connector *connector = &aconnector->base;
2735         struct drm_device *dev = connector->dev;
2736         struct dc_link *dc_link = aconnector->dc_link;
2737         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2738         bool result = false;
2739         enum dc_connection_type new_connection_type = dc_connection_none;
2740         struct amdgpu_device *adev = drm_to_adev(dev);
2741         union hpd_irq_data hpd_irq_data;
2742
2743         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2744
2745         if (adev->dm.disable_hpd_irq)
2746                 return;
2747
2748
2749         /*
2750          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2751          * conflict, after implement i2c helper, this mutex should be
2752          * retired.
2753          */
2754         mutex_lock(&aconnector->hpd_lock);
2755
2756         read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2757
2758         if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2759                 (dc_link->type == dc_connection_mst_branch)) {
2760                 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2761                         result = true;
2762                         dm_handle_hpd_rx_irq(aconnector);
2763                         goto out;
2764                 } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2765                         result = false;
2766                         dm_handle_hpd_rx_irq(aconnector);
2767                         goto out;
2768                 }
2769         }
2770
2771         if (!amdgpu_in_reset(adev)) {
2772                 mutex_lock(&adev->dm.dc_lock);
2773 #ifdef CONFIG_DRM_AMD_DC_HDCP
2774         result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2775 #else
2776         result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2777 #endif
2778                 mutex_unlock(&adev->dm.dc_lock);
2779         }
2780
2781 out:
2782         if (result && !is_mst_root_connector) {
2783                 /* Downstream Port status changed. */
2784                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2785                         DRM_ERROR("KMS: Failed to detect connector\n");
2786
2787                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2788                         emulated_link_detect(dc_link);
2789
2790                         if (aconnector->fake_enable)
2791                                 aconnector->fake_enable = false;
2792
2793                         amdgpu_dm_update_connector_after_detect(aconnector);
2794
2795
2796                         drm_modeset_lock_all(dev);
2797                         dm_restore_drm_connector_state(dev, connector);
2798                         drm_modeset_unlock_all(dev);
2799
2800                         drm_kms_helper_hotplug_event(dev);
2801                 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2802
2803                         if (aconnector->fake_enable)
2804                                 aconnector->fake_enable = false;
2805
2806                         amdgpu_dm_update_connector_after_detect(aconnector);
2807
2808
2809                         drm_modeset_lock_all(dev);
2810                         dm_restore_drm_connector_state(dev, connector);
2811                         drm_modeset_unlock_all(dev);
2812
2813                         drm_kms_helper_hotplug_event(dev);
2814                 }
2815         }
2816 #ifdef CONFIG_DRM_AMD_DC_HDCP
2817         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2818                 if (adev->dm.hdcp_workqueue)
2819                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2820         }
2821 #endif
2822
2823         if (dc_link->type != dc_connection_mst_branch)
2824                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2825
2826         mutex_unlock(&aconnector->hpd_lock);
2827 }
2828
2829 static void register_hpd_handlers(struct amdgpu_device *adev)
2830 {
2831         struct drm_device *dev = adev_to_drm(adev);
2832         struct drm_connector *connector;
2833         struct amdgpu_dm_connector *aconnector;
2834         const struct dc_link *dc_link;
2835         struct dc_interrupt_params int_params = {0};
2836
2837         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2838         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2839
2840         list_for_each_entry(connector,
2841                         &dev->mode_config.connector_list, head) {
2842
2843                 aconnector = to_amdgpu_dm_connector(connector);
2844                 dc_link = aconnector->dc_link;
2845
2846                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2847                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2848                         int_params.irq_source = dc_link->irq_source_hpd;
2849
2850                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2851                                         handle_hpd_irq,
2852                                         (void *) aconnector);
2853                 }
2854
2855                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2856
2857                         /* Also register for DP short pulse (hpd_rx). */
2858                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2859                         int_params.irq_source = dc_link->irq_source_hpd_rx;
2860
2861                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2862                                         handle_hpd_rx_irq,
2863                                         (void *) aconnector);
2864                 }
2865         }
2866 }
2867
2868 #if defined(CONFIG_DRM_AMD_DC_SI)
2869 /* Register IRQ sources and initialize IRQ callbacks */
2870 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2871 {
2872         struct dc *dc = adev->dm.dc;
2873         struct common_irq_params *c_irq_params;
2874         struct dc_interrupt_params int_params = {0};
2875         int r;
2876         int i;
2877         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2878
2879         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2880         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2881
2882         /*
2883          * Actions of amdgpu_irq_add_id():
2884          * 1. Register a set() function with base driver.
2885          *    Base driver will call set() function to enable/disable an
2886          *    interrupt in DC hardware.
2887          * 2. Register amdgpu_dm_irq_handler().
2888          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2889          *    coming from DC hardware.
2890          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2891          *    for acknowledging and handling. */
2892
2893         /* Use VBLANK interrupt */
2894         for (i = 0; i < adev->mode_info.num_crtc; i++) {
2895                 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2896                 if (r) {
2897                         DRM_ERROR("Failed to add crtc irq id!\n");
2898                         return r;
2899                 }
2900
2901                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2902                 int_params.irq_source =
2903                         dc_interrupt_to_irq_source(dc, i+1 , 0);
2904
2905                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2906
2907                 c_irq_params->adev = adev;
2908                 c_irq_params->irq_src = int_params.irq_source;
2909
2910                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2911                                 dm_crtc_high_irq, c_irq_params);
2912         }
2913
2914         /* Use GRPH_PFLIP interrupt */
2915         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2916                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2917                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2918                 if (r) {
2919                         DRM_ERROR("Failed to add page flip irq id!\n");
2920                         return r;
2921                 }
2922
2923                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2924                 int_params.irq_source =
2925                         dc_interrupt_to_irq_source(dc, i, 0);
2926
2927                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2928
2929                 c_irq_params->adev = adev;
2930                 c_irq_params->irq_src = int_params.irq_source;
2931
2932                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2933                                 dm_pflip_high_irq, c_irq_params);
2934
2935         }
2936
2937         /* HPD */
2938         r = amdgpu_irq_add_id(adev, client_id,
2939                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2940         if (r) {
2941                 DRM_ERROR("Failed to add hpd irq id!\n");
2942                 return r;
2943         }
2944
2945         register_hpd_handlers(adev);
2946
2947         return 0;
2948 }
2949 #endif
2950
2951 /* Register IRQ sources and initialize IRQ callbacks */
2952 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2953 {
2954         struct dc *dc = adev->dm.dc;
2955         struct common_irq_params *c_irq_params;
2956         struct dc_interrupt_params int_params = {0};
2957         int r;
2958         int i;
2959         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2960
2961         if (adev->asic_type >= CHIP_VEGA10)
2962                 client_id = SOC15_IH_CLIENTID_DCE;
2963
2964         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2965         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2966
2967         /*
2968          * Actions of amdgpu_irq_add_id():
2969          * 1. Register a set() function with base driver.
2970          *    Base driver will call set() function to enable/disable an
2971          *    interrupt in DC hardware.
2972          * 2. Register amdgpu_dm_irq_handler().
2973          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2974          *    coming from DC hardware.
2975          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2976          *    for acknowledging and handling. */
2977
2978         /* Use VBLANK interrupt */
2979         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2980                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2981                 if (r) {
2982                         DRM_ERROR("Failed to add crtc irq id!\n");
2983                         return r;
2984                 }
2985
2986                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2987                 int_params.irq_source =
2988                         dc_interrupt_to_irq_source(dc, i, 0);
2989
2990                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2991
2992                 c_irq_params->adev = adev;
2993                 c_irq_params->irq_src = int_params.irq_source;
2994
2995                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2996                                 dm_crtc_high_irq, c_irq_params);
2997         }
2998
2999         /* Use VUPDATE interrupt */
3000         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3001                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3002                 if (r) {
3003                         DRM_ERROR("Failed to add vupdate irq id!\n");
3004                         return r;
3005                 }
3006
3007                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3008                 int_params.irq_source =
3009                         dc_interrupt_to_irq_source(dc, i, 0);
3010
3011                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3012
3013                 c_irq_params->adev = adev;
3014                 c_irq_params->irq_src = int_params.irq_source;
3015
3016                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3017                                 dm_vupdate_high_irq, c_irq_params);
3018         }
3019
3020         /* Use GRPH_PFLIP interrupt */
3021         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3022                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3023                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3024                 if (r) {
3025                         DRM_ERROR("Failed to add page flip irq id!\n");
3026                         return r;
3027                 }
3028
3029                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3030                 int_params.irq_source =
3031                         dc_interrupt_to_irq_source(dc, i, 0);
3032
3033                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3034
3035                 c_irq_params->adev = adev;
3036                 c_irq_params->irq_src = int_params.irq_source;
3037
3038                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3039                                 dm_pflip_high_irq, c_irq_params);
3040
3041         }
3042
3043         /* HPD */
3044         r = amdgpu_irq_add_id(adev, client_id,
3045                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3046         if (r) {
3047                 DRM_ERROR("Failed to add hpd irq id!\n");
3048                 return r;
3049         }
3050
3051         register_hpd_handlers(adev);
3052
3053         return 0;
3054 }
3055
3056 #if defined(CONFIG_DRM_AMD_DC_DCN)
3057 /* Register IRQ sources and initialize IRQ callbacks */
3058 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3059 {
3060         struct dc *dc = adev->dm.dc;
3061         struct common_irq_params *c_irq_params;
3062         struct dc_interrupt_params int_params = {0};
3063         int r;
3064         int i;
3065 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3066         static const unsigned int vrtl_int_srcid[] = {
3067                 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3068                 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3069                 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3070                 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3071                 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3072                 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3073         };
3074 #endif
3075
3076         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3077         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3078
3079         /*
3080          * Actions of amdgpu_irq_add_id():
3081          * 1. Register a set() function with base driver.
3082          *    Base driver will call set() function to enable/disable an
3083          *    interrupt in DC hardware.
3084          * 2. Register amdgpu_dm_irq_handler().
3085          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3086          *    coming from DC hardware.
3087          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3088          *    for acknowledging and handling.
3089          */
3090
3091         /* Use VSTARTUP interrupt */
3092         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3093                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3094                         i++) {
3095                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3096
3097                 if (r) {
3098                         DRM_ERROR("Failed to add crtc irq id!\n");
3099                         return r;
3100                 }
3101
3102                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3103                 int_params.irq_source =
3104                         dc_interrupt_to_irq_source(dc, i, 0);
3105
3106                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3107
3108                 c_irq_params->adev = adev;
3109                 c_irq_params->irq_src = int_params.irq_source;
3110
3111                 amdgpu_dm_irq_register_interrupt(
3112                         adev, &int_params, dm_crtc_high_irq, c_irq_params);
3113         }
3114
3115         /* Use otg vertical line interrupt */
3116 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3117         for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3118                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3119                                 vrtl_int_srcid[i], &adev->vline0_irq);
3120
3121                 if (r) {
3122                         DRM_ERROR("Failed to add vline0 irq id!\n");
3123                         return r;
3124                 }
3125
3126                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3127                 int_params.irq_source =
3128                         dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3129
3130                 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3131                         DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3132                         break;
3133                 }
3134
3135                 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3136                                         - DC_IRQ_SOURCE_DC1_VLINE0];
3137
3138                 c_irq_params->adev = adev;
3139                 c_irq_params->irq_src = int_params.irq_source;
3140
3141                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3142                                 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3143         }
3144 #endif
3145
3146         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3147          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3148          * to trigger at end of each vblank, regardless of state of the lock,
3149          * matching DCE behaviour.
3150          */
3151         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3152              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3153              i++) {
3154                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3155
3156                 if (r) {
3157                         DRM_ERROR("Failed to add vupdate irq id!\n");
3158                         return r;
3159                 }
3160
3161                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3162                 int_params.irq_source =
3163                         dc_interrupt_to_irq_source(dc, i, 0);
3164
3165                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3166
3167                 c_irq_params->adev = adev;
3168                 c_irq_params->irq_src = int_params.irq_source;
3169
3170                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3171                                 dm_vupdate_high_irq, c_irq_params);
3172         }
3173
3174         /* Use GRPH_PFLIP interrupt */
3175         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3176                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3177                         i++) {
3178                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3179                 if (r) {
3180                         DRM_ERROR("Failed to add page flip irq id!\n");
3181                         return r;
3182                 }
3183
3184                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3185                 int_params.irq_source =
3186                         dc_interrupt_to_irq_source(dc, i, 0);
3187
3188                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3189
3190                 c_irq_params->adev = adev;
3191                 c_irq_params->irq_src = int_params.irq_source;
3192
3193                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3194                                 dm_pflip_high_irq, c_irq_params);
3195
3196         }
3197
3198         /* HPD */
3199         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3200                         &adev->hpd_irq);
3201         if (r) {
3202                 DRM_ERROR("Failed to add hpd irq id!\n");
3203                 return r;
3204         }
3205
3206         register_hpd_handlers(adev);
3207
3208         return 0;
3209 }
3210 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3211 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3212 {
3213         struct dc *dc = adev->dm.dc;
3214         struct common_irq_params *c_irq_params;
3215         struct dc_interrupt_params int_params = {0};
3216         int r, i;
3217
3218         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3219         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3220
3221         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3222                         &adev->dmub_outbox_irq);
3223         if (r) {
3224                 DRM_ERROR("Failed to add outbox irq id!\n");
3225                 return r;
3226         }
3227
3228         if (dc->ctx->dmub_srv) {
3229                 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3230                 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3231                 int_params.irq_source =
3232                 dc_interrupt_to_irq_source(dc, i, 0);
3233
3234                 c_irq_params = &adev->dm.dmub_outbox_params[0];
3235
3236                 c_irq_params->adev = adev;
3237                 c_irq_params->irq_src = int_params.irq_source;
3238
3239                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3240                                 dm_dmub_outbox1_low_irq, c_irq_params);
3241         }
3242
3243         return 0;
3244 }
3245 #endif
3246
3247 /*
3248  * Acquires the lock for the atomic state object and returns
3249  * the new atomic state.
3250  *
3251  * This should only be called during atomic check.
3252  */
3253 static int dm_atomic_get_state(struct drm_atomic_state *state,
3254                                struct dm_atomic_state **dm_state)
3255 {
3256         struct drm_device *dev = state->dev;
3257         struct amdgpu_device *adev = drm_to_adev(dev);
3258         struct amdgpu_display_manager *dm = &adev->dm;
3259         struct drm_private_state *priv_state;
3260
3261         if (*dm_state)
3262                 return 0;
3263
3264         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3265         if (IS_ERR(priv_state))
3266                 return PTR_ERR(priv_state);
3267
3268         *dm_state = to_dm_atomic_state(priv_state);
3269
3270         return 0;
3271 }
3272
3273 static struct dm_atomic_state *
3274 dm_atomic_get_new_state(struct drm_atomic_state *state)
3275 {
3276         struct drm_device *dev = state->dev;
3277         struct amdgpu_device *adev = drm_to_adev(dev);
3278         struct amdgpu_display_manager *dm = &adev->dm;
3279         struct drm_private_obj *obj;
3280         struct drm_private_state *new_obj_state;
3281         int i;
3282
3283         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3284                 if (obj->funcs == dm->atomic_obj.funcs)
3285                         return to_dm_atomic_state(new_obj_state);
3286         }
3287
3288         return NULL;
3289 }
3290
3291 static struct drm_private_state *
3292 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3293 {
3294         struct dm_atomic_state *old_state, *new_state;
3295
3296         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3297         if (!new_state)
3298                 return NULL;
3299
3300         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3301
3302         old_state = to_dm_atomic_state(obj->state);
3303
3304         if (old_state && old_state->context)
3305                 new_state->context = dc_copy_state(old_state->context);
3306
3307         if (!new_state->context) {
3308                 kfree(new_state);
3309                 return NULL;
3310         }
3311
3312         return &new_state->base;
3313 }
3314
3315 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3316                                     struct drm_private_state *state)
3317 {
3318         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3319
3320         if (dm_state && dm_state->context)
3321                 dc_release_state(dm_state->context);
3322
3323         kfree(dm_state);
3324 }
3325
3326 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3327         .atomic_duplicate_state = dm_atomic_duplicate_state,
3328         .atomic_destroy_state = dm_atomic_destroy_state,
3329 };
3330
3331 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3332 {
3333         struct dm_atomic_state *state;
3334         int r;
3335
3336         adev->mode_info.mode_config_initialized = true;
3337
3338         adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3339         adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3340
3341         adev_to_drm(adev)->mode_config.max_width = 16384;
3342         adev_to_drm(adev)->mode_config.max_height = 16384;
3343
3344         adev_to_drm(adev)->mode_config.preferred_depth = 24;
3345         adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3346         /* indicates support for immediate flip */
3347         adev_to_drm(adev)->mode_config.async_page_flip = true;
3348
3349         adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3350
3351         state = kzalloc(sizeof(*state), GFP_KERNEL);
3352         if (!state)
3353                 return -ENOMEM;
3354
3355         state->context = dc_create_state(adev->dm.dc);
3356         if (!state->context) {
3357                 kfree(state);
3358                 return -ENOMEM;
3359         }
3360
3361         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3362
3363         drm_atomic_private_obj_init(adev_to_drm(adev),
3364                                     &adev->dm.atomic_obj,
3365                                     &state->base,
3366                                     &dm_atomic_state_funcs);
3367
3368         r = amdgpu_display_modeset_create_props(adev);
3369         if (r) {
3370                 dc_release_state(state->context);
3371                 kfree(state);
3372                 return r;
3373         }
3374
3375         r = amdgpu_dm_audio_init(adev);
3376         if (r) {
3377                 dc_release_state(state->context);
3378                 kfree(state);
3379                 return r;
3380         }
3381
3382         return 0;
3383 }
3384
3385 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3386 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3387 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3388
3389 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3390         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3391
3392 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3393 {
3394 #if defined(CONFIG_ACPI)
3395         struct amdgpu_dm_backlight_caps caps;
3396
3397         memset(&caps, 0, sizeof(caps));
3398
3399         if (dm->backlight_caps.caps_valid)
3400                 return;
3401
3402         amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3403         if (caps.caps_valid) {
3404                 dm->backlight_caps.caps_valid = true;
3405                 if (caps.aux_support)
3406                         return;
3407                 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3408                 dm->backlight_caps.max_input_signal = caps.max_input_signal;
3409         } else {
3410                 dm->backlight_caps.min_input_signal =
3411                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3412                 dm->backlight_caps.max_input_signal =
3413                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3414         }
3415 #else
3416         if (dm->backlight_caps.aux_support)
3417                 return;
3418
3419         dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3420         dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3421 #endif
3422 }
3423
3424 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3425                                 unsigned *min, unsigned *max)
3426 {
3427         if (!caps)
3428                 return 0;
3429
3430         if (caps->aux_support) {
3431                 // Firmware limits are in nits, DC API wants millinits.
3432                 *max = 1000 * caps->aux_max_input_signal;
3433                 *min = 1000 * caps->aux_min_input_signal;
3434         } else {
3435                 // Firmware limits are 8-bit, PWM control is 16-bit.
3436                 *max = 0x101 * caps->max_input_signal;
3437                 *min = 0x101 * caps->min_input_signal;
3438         }
3439         return 1;
3440 }
3441
3442 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3443                                         uint32_t brightness)
3444 {
3445         unsigned min, max;
3446
3447         if (!get_brightness_range(caps, &min, &max))
3448                 return brightness;
3449
3450         // Rescale 0..255 to min..max
3451         return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3452                                        AMDGPU_MAX_BL_LEVEL);
3453 }
3454
3455 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3456                                       uint32_t brightness)
3457 {
3458         unsigned min, max;
3459
3460         if (!get_brightness_range(caps, &min, &max))
3461                 return brightness;
3462
3463         if (brightness < min)
3464                 return 0;
3465         // Rescale min..max to 0..255
3466         return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3467                                  max - min);
3468 }
3469
3470 static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3471                                          u32 user_brightness)
3472 {
3473         struct amdgpu_dm_backlight_caps caps;
3474         struct dc_link *link[AMDGPU_DM_MAX_NUM_EDP];
3475         u32 brightness[AMDGPU_DM_MAX_NUM_EDP];
3476         bool rc;
3477         int i;
3478
3479         amdgpu_dm_update_backlight_caps(dm);
3480         caps = dm->backlight_caps;
3481
3482         for (i = 0; i < dm->num_of_edps; i++) {
3483                 dm->brightness[i] = user_brightness;
3484                 brightness[i] = convert_brightness_from_user(&caps, dm->brightness[i]);
3485                 link[i] = (struct dc_link *)dm->backlight_link[i];
3486         }
3487
3488         /* Change brightness based on AUX property */
3489         if (caps.aux_support) {
3490                 for (i = 0; i < dm->num_of_edps; i++) {
3491                         rc = dc_link_set_backlight_level_nits(link[i], true, brightness[i],
3492                                 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3493                         if (!rc) {
3494                                 DRM_ERROR("DM: Failed to update backlight via AUX on eDP[%d]\n", i);
3495                                 break;
3496                         }
3497                 }
3498         } else {
3499                 for (i = 0; i < dm->num_of_edps; i++) {
3500                         rc = dc_link_set_backlight_level(dm->backlight_link[i], brightness[i], 0);
3501                         if (!rc) {
3502                                 DRM_ERROR("DM: Failed to update backlight on eDP[%d]\n", i);
3503                                 break;
3504                         }
3505                 }
3506         }
3507
3508         return rc ? 0 : 1;
3509 }
3510
3511 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3512 {
3513         struct amdgpu_display_manager *dm = bl_get_data(bd);
3514
3515         amdgpu_dm_backlight_set_level(dm, bd->props.brightness);
3516
3517         return 0;
3518 }
3519
3520 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm)
3521 {
3522         struct amdgpu_dm_backlight_caps caps;
3523
3524         amdgpu_dm_update_backlight_caps(dm);
3525         caps = dm->backlight_caps;
3526
3527         if (caps.aux_support) {
3528                 struct dc_link *link = (struct dc_link *)dm->backlight_link[0];
3529                 u32 avg, peak;
3530                 bool rc;
3531
3532                 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3533                 if (!rc)
3534                         return dm->brightness[0];
3535                 return convert_brightness_to_user(&caps, avg);
3536         } else {
3537                 int ret = dc_link_get_backlight_level(dm->backlight_link[0]);
3538
3539                 if (ret == DC_ERROR_UNEXPECTED)
3540                         return dm->brightness[0];
3541                 return convert_brightness_to_user(&caps, ret);
3542         }
3543 }
3544
3545 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3546 {
3547         struct amdgpu_display_manager *dm = bl_get_data(bd);
3548
3549         return amdgpu_dm_backlight_get_level(dm);
3550 }
3551
3552 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3553         .options = BL_CORE_SUSPENDRESUME,
3554         .get_brightness = amdgpu_dm_backlight_get_brightness,
3555         .update_status  = amdgpu_dm_backlight_update_status,
3556 };
3557
3558 static void
3559 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3560 {
3561         char bl_name[16];
3562         struct backlight_properties props = { 0 };
3563         int i;
3564
3565         amdgpu_dm_update_backlight_caps(dm);
3566         for (i = 0; i < dm->num_of_edps; i++)
3567                 dm->brightness[i] = AMDGPU_MAX_BL_LEVEL;
3568
3569         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3570         props.brightness = AMDGPU_MAX_BL_LEVEL;
3571         props.type = BACKLIGHT_RAW;
3572
3573         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3574                  adev_to_drm(dm->adev)->primary->index);
3575
3576         dm->backlight_dev = backlight_device_register(bl_name,
3577                                                       adev_to_drm(dm->adev)->dev,
3578                                                       dm,
3579                                                       &amdgpu_dm_backlight_ops,
3580                                                       &props);
3581
3582         if (IS_ERR(dm->backlight_dev))
3583                 DRM_ERROR("DM: Backlight registration failed!\n");
3584         else
3585                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3586 }
3587
3588 #endif
3589
3590 static int initialize_plane(struct amdgpu_display_manager *dm,
3591                             struct amdgpu_mode_info *mode_info, int plane_id,
3592                             enum drm_plane_type plane_type,
3593                             const struct dc_plane_cap *plane_cap)
3594 {
3595         struct drm_plane *plane;
3596         unsigned long possible_crtcs;
3597         int ret = 0;
3598
3599         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3600         if (!plane) {
3601                 DRM_ERROR("KMS: Failed to allocate plane\n");
3602                 return -ENOMEM;
3603         }
3604         plane->type = plane_type;
3605
3606         /*
3607          * HACK: IGT tests expect that the primary plane for a CRTC
3608          * can only have one possible CRTC. Only expose support for
3609          * any CRTC if they're not going to be used as a primary plane
3610          * for a CRTC - like overlay or underlay planes.
3611          */
3612         possible_crtcs = 1 << plane_id;
3613         if (plane_id >= dm->dc->caps.max_streams)
3614                 possible_crtcs = 0xff;
3615
3616         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3617
3618         if (ret) {
3619                 DRM_ERROR("KMS: Failed to initialize plane\n");
3620                 kfree(plane);
3621                 return ret;
3622         }
3623
3624         if (mode_info)
3625                 mode_info->planes[plane_id] = plane;
3626
3627         return ret;
3628 }
3629
3630
3631 static void register_backlight_device(struct amdgpu_display_manager *dm,
3632                                       struct dc_link *link)
3633 {
3634 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3635         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3636
3637         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3638             link->type != dc_connection_none) {
3639                 /*
3640                  * Event if registration failed, we should continue with
3641                  * DM initialization because not having a backlight control
3642                  * is better then a black screen.
3643                  */
3644                 if (!dm->backlight_dev)
3645                         amdgpu_dm_register_backlight_device(dm);
3646
3647                 if (dm->backlight_dev) {
3648                         dm->backlight_link[dm->num_of_edps] = link;
3649                         dm->num_of_edps++;
3650                 }
3651         }
3652 #endif
3653 }
3654
3655
3656 /*
3657  * In this architecture, the association
3658  * connector -> encoder -> crtc
3659  * id not really requried. The crtc and connector will hold the
3660  * display_index as an abstraction to use with DAL component
3661  *
3662  * Returns 0 on success
3663  */
3664 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3665 {
3666         struct amdgpu_display_manager *dm = &adev->dm;
3667         int32_t i;
3668         struct amdgpu_dm_connector *aconnector = NULL;
3669         struct amdgpu_encoder *aencoder = NULL;
3670         struct amdgpu_mode_info *mode_info = &adev->mode_info;
3671         uint32_t link_cnt;
3672         int32_t primary_planes;
3673         enum dc_connection_type new_connection_type = dc_connection_none;
3674         const struct dc_plane_cap *plane;
3675
3676         dm->display_indexes_num = dm->dc->caps.max_streams;
3677         /* Update the actual used number of crtc */
3678         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3679
3680         link_cnt = dm->dc->caps.max_links;
3681         if (amdgpu_dm_mode_config_init(dm->adev)) {
3682                 DRM_ERROR("DM: Failed to initialize mode config\n");
3683                 return -EINVAL;
3684         }
3685
3686         /* There is one primary plane per CRTC */
3687         primary_planes = dm->dc->caps.max_streams;
3688         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3689
3690         /*
3691          * Initialize primary planes, implicit planes for legacy IOCTLS.
3692          * Order is reversed to match iteration order in atomic check.
3693          */
3694         for (i = (primary_planes - 1); i >= 0; i--) {
3695                 plane = &dm->dc->caps.planes[i];
3696
3697                 if (initialize_plane(dm, mode_info, i,
3698                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
3699                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
3700                         goto fail;
3701                 }
3702         }
3703
3704         /*
3705          * Initialize overlay planes, index starting after primary planes.
3706          * These planes have a higher DRM index than the primary planes since
3707          * they should be considered as having a higher z-order.
3708          * Order is reversed to match iteration order in atomic check.
3709          *
3710          * Only support DCN for now, and only expose one so we don't encourage
3711          * userspace to use up all the pipes.
3712          */
3713         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3714                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3715
3716                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3717                         continue;
3718
3719                 if (!plane->blends_with_above || !plane->blends_with_below)
3720                         continue;
3721
3722                 if (!plane->pixel_format_support.argb8888)
3723                         continue;
3724
3725                 if (initialize_plane(dm, NULL, primary_planes + i,
3726                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
3727                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3728                         goto fail;
3729                 }
3730
3731                 /* Only create one overlay plane. */
3732                 break;
3733         }
3734
3735         for (i = 0; i < dm->dc->caps.max_streams; i++)
3736                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3737                         DRM_ERROR("KMS: Failed to initialize crtc\n");
3738                         goto fail;
3739                 }
3740
3741 #if defined(CONFIG_DRM_AMD_DC_DCN)
3742         /* Use Outbox interrupt */
3743         switch (adev->asic_type) {
3744         case CHIP_SIENNA_CICHLID:
3745         case CHIP_NAVY_FLOUNDER:
3746         case CHIP_RENOIR:
3747                 if (register_outbox_irq_handlers(dm->adev)) {
3748                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3749                         goto fail;
3750                 }
3751                 break;
3752         default:
3753                 DRM_DEBUG_KMS("Unsupported ASIC type for outbox: 0x%X\n", adev->asic_type);
3754         }
3755 #endif
3756
3757         /* loops over all connectors on the board */
3758         for (i = 0; i < link_cnt; i++) {
3759                 struct dc_link *link = NULL;
3760
3761                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3762                         DRM_ERROR(
3763                                 "KMS: Cannot support more than %d display indexes\n",
3764                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
3765                         continue;
3766                 }
3767
3768                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3769                 if (!aconnector)
3770                         goto fail;
3771
3772                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3773                 if (!aencoder)
3774                         goto fail;
3775
3776                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3777                         DRM_ERROR("KMS: Failed to initialize encoder\n");
3778                         goto fail;
3779                 }
3780
3781                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3782                         DRM_ERROR("KMS: Failed to initialize connector\n");
3783                         goto fail;
3784                 }
3785
3786                 link = dc_get_link_at_index(dm->dc, i);
3787
3788                 if (!dc_link_detect_sink(link, &new_connection_type))
3789                         DRM_ERROR("KMS: Failed to detect connector\n");
3790
3791                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3792                         emulated_link_detect(link);
3793                         amdgpu_dm_update_connector_after_detect(aconnector);
3794
3795                 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3796                         amdgpu_dm_update_connector_after_detect(aconnector);
3797                         register_backlight_device(dm, link);
3798                         if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3799                                 amdgpu_dm_set_psr_caps(link);
3800                 }
3801
3802
3803         }
3804
3805         /* Software is initialized. Now we can register interrupt handlers. */
3806         switch (adev->asic_type) {
3807 #if defined(CONFIG_DRM_AMD_DC_SI)
3808         case CHIP_TAHITI:
3809         case CHIP_PITCAIRN:
3810         case CHIP_VERDE:
3811         case CHIP_OLAND:
3812                 if (dce60_register_irq_handlers(dm->adev)) {
3813                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3814                         goto fail;
3815                 }
3816                 break;
3817 #endif
3818         case CHIP_BONAIRE:
3819         case CHIP_HAWAII:
3820         case CHIP_KAVERI:
3821         case CHIP_KABINI:
3822         case CHIP_MULLINS:
3823         case CHIP_TONGA:
3824         case CHIP_FIJI:
3825         case CHIP_CARRIZO:
3826         case CHIP_STONEY:
3827         case CHIP_POLARIS11:
3828         case CHIP_POLARIS10:
3829         case CHIP_POLARIS12:
3830         case CHIP_VEGAM:
3831         case CHIP_VEGA10:
3832         case CHIP_VEGA12:
3833         case CHIP_VEGA20:
3834                 if (dce110_register_irq_handlers(dm->adev)) {
3835                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3836                         goto fail;
3837                 }
3838                 break;
3839 #if defined(CONFIG_DRM_AMD_DC_DCN)
3840         case CHIP_RAVEN:
3841         case CHIP_NAVI12:
3842         case CHIP_NAVI10:
3843         case CHIP_NAVI14:
3844         case CHIP_RENOIR:
3845         case CHIP_SIENNA_CICHLID:
3846         case CHIP_NAVY_FLOUNDER:
3847         case CHIP_DIMGREY_CAVEFISH:
3848         case CHIP_BEIGE_GOBY:
3849         case CHIP_VANGOGH:
3850                 if (dcn10_register_irq_handlers(dm->adev)) {
3851                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3852                         goto fail;
3853                 }
3854                 break;
3855 #endif
3856         default:
3857                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3858                 goto fail;
3859         }
3860
3861         return 0;
3862 fail:
3863         kfree(aencoder);
3864         kfree(aconnector);
3865
3866         return -EINVAL;
3867 }
3868
3869 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3870 {
3871         drm_mode_config_cleanup(dm->ddev);
3872         drm_atomic_private_obj_fini(&dm->atomic_obj);
3873         return;
3874 }
3875
3876 /******************************************************************************
3877  * amdgpu_display_funcs functions
3878  *****************************************************************************/
3879
3880 /*
3881  * dm_bandwidth_update - program display watermarks
3882  *
3883  * @adev: amdgpu_device pointer
3884  *
3885  * Calculate and program the display watermarks and line buffer allocation.
3886  */
3887 static void dm_bandwidth_update(struct amdgpu_device *adev)
3888 {
3889         /* TODO: implement later */
3890 }
3891
3892 static const struct amdgpu_display_funcs dm_display_funcs = {
3893         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3894         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3895         .backlight_set_level = NULL, /* never called for DC */
3896         .backlight_get_level = NULL, /* never called for DC */
3897         .hpd_sense = NULL,/* called unconditionally */
3898         .hpd_set_polarity = NULL, /* called unconditionally */
3899         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3900         .page_flip_get_scanoutpos =
3901                 dm_crtc_get_scanoutpos,/* called unconditionally */
3902         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3903         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3904 };
3905
3906 #if defined(CONFIG_DEBUG_KERNEL_DC)
3907
3908 static ssize_t s3_debug_store(struct device *device,
3909                               struct device_attribute *attr,
3910                               const char *buf,
3911                               size_t count)
3912 {
3913         int ret;
3914         int s3_state;
3915         struct drm_device *drm_dev = dev_get_drvdata(device);
3916         struct amdgpu_device *adev = drm_to_adev(drm_dev);
3917
3918         ret = kstrtoint(buf, 0, &s3_state);
3919
3920         if (ret == 0) {
3921                 if (s3_state) {
3922                         dm_resume(adev);
3923                         drm_kms_helper_hotplug_event(adev_to_drm(adev));
3924                 } else
3925                         dm_suspend(adev);
3926         }
3927
3928         return ret == 0 ? count : 0;
3929 }
3930
3931 DEVICE_ATTR_WO(s3_debug);
3932
3933 #endif
3934
3935 static int dm_early_init(void *handle)
3936 {
3937         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3938
3939         switch (adev->asic_type) {
3940 #if defined(CONFIG_DRM_AMD_DC_SI)
3941         case CHIP_TAHITI:
3942         case CHIP_PITCAIRN:
3943         case CHIP_VERDE:
3944                 adev->mode_info.num_crtc = 6;
3945                 adev->mode_info.num_hpd = 6;
3946                 adev->mode_info.num_dig = 6;
3947                 break;
3948         case CHIP_OLAND:
3949                 adev->mode_info.num_crtc = 2;
3950                 adev->mode_info.num_hpd = 2;
3951                 adev->mode_info.num_dig = 2;
3952                 break;
3953 #endif
3954         case CHIP_BONAIRE:
3955         case CHIP_HAWAII:
3956                 adev->mode_info.num_crtc = 6;
3957                 adev->mode_info.num_hpd = 6;
3958                 adev->mode_info.num_dig = 6;
3959                 break;
3960         case CHIP_KAVERI:
3961                 adev->mode_info.num_crtc = 4;
3962                 adev->mode_info.num_hpd = 6;
3963                 adev->mode_info.num_dig = 7;
3964                 break;
3965         case CHIP_KABINI:
3966         case CHIP_MULLINS:
3967                 adev->mode_info.num_crtc = 2;
3968                 adev->mode_info.num_hpd = 6;
3969                 adev->mode_info.num_dig = 6;
3970                 break;
3971         case CHIP_FIJI:
3972         case CHIP_TONGA:
3973                 adev->mode_info.num_crtc = 6;
3974                 adev->mode_info.num_hpd = 6;
3975                 adev->mode_info.num_dig = 7;
3976                 break;
3977         case CHIP_CARRIZO:
3978                 adev->mode_info.num_crtc = 3;
3979                 adev->mode_info.num_hpd = 6;
3980                 adev->mode_info.num_dig = 9;
3981                 break;
3982         case CHIP_STONEY:
3983                 adev->mode_info.num_crtc = 2;
3984                 adev->mode_info.num_hpd = 6;
3985                 adev->mode_info.num_dig = 9;
3986                 break;
3987         case CHIP_POLARIS11:
3988         case CHIP_POLARIS12:
3989                 adev->mode_info.num_crtc = 5;
3990                 adev->mode_info.num_hpd = 5;
3991                 adev->mode_info.num_dig = 5;
3992                 break;
3993         case CHIP_POLARIS10:
3994         case CHIP_VEGAM:
3995                 adev->mode_info.num_crtc = 6;
3996                 adev->mode_info.num_hpd = 6;
3997                 adev->mode_info.num_dig = 6;
3998                 break;
3999         case CHIP_VEGA10:
4000         case CHIP_VEGA12:
4001         case CHIP_VEGA20:
4002                 adev->mode_info.num_crtc = 6;
4003                 adev->mode_info.num_hpd = 6;
4004                 adev->mode_info.num_dig = 6;
4005                 break;
4006 #if defined(CONFIG_DRM_AMD_DC_DCN)
4007         case CHIP_RAVEN:
4008         case CHIP_RENOIR:
4009         case CHIP_VANGOGH:
4010                 adev->mode_info.num_crtc = 4;
4011                 adev->mode_info.num_hpd = 4;
4012                 adev->mode_info.num_dig = 4;
4013                 break;
4014         case CHIP_NAVI10:
4015         case CHIP_NAVI12:
4016         case CHIP_SIENNA_CICHLID:
4017         case CHIP_NAVY_FLOUNDER:
4018                 adev->mode_info.num_crtc = 6;
4019                 adev->mode_info.num_hpd = 6;
4020                 adev->mode_info.num_dig = 6;
4021                 break;
4022         case CHIP_NAVI14:
4023         case CHIP_DIMGREY_CAVEFISH:
4024                 adev->mode_info.num_crtc = 5;
4025                 adev->mode_info.num_hpd = 5;
4026                 adev->mode_info.num_dig = 5;
4027                 break;
4028         case CHIP_BEIGE_GOBY:
4029                 adev->mode_info.num_crtc = 2;
4030                 adev->mode_info.num_hpd = 2;
4031                 adev->mode_info.num_dig = 2;
4032                 break;
4033 #endif
4034         default:
4035                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4036                 return -EINVAL;
4037         }
4038
4039         amdgpu_dm_set_irq_funcs(adev);
4040
4041         if (adev->mode_info.funcs == NULL)
4042                 adev->mode_info.funcs = &dm_display_funcs;
4043
4044         /*
4045          * Note: Do NOT change adev->audio_endpt_rreg and
4046          * adev->audio_endpt_wreg because they are initialised in
4047          * amdgpu_device_init()
4048          */
4049 #if defined(CONFIG_DEBUG_KERNEL_DC)
4050         device_create_file(
4051                 adev_to_drm(adev)->dev,
4052                 &dev_attr_s3_debug);
4053 #endif
4054
4055         return 0;
4056 }
4057
4058 static bool modeset_required(struct drm_crtc_state *crtc_state,
4059                              struct dc_stream_state *new_stream,
4060                              struct dc_stream_state *old_stream)
4061 {
4062         return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4063 }
4064
4065 static bool modereset_required(struct drm_crtc_state *crtc_state)
4066 {
4067         return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4068 }
4069
4070 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4071 {
4072         drm_encoder_cleanup(encoder);
4073         kfree(encoder);
4074 }
4075
4076 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4077         .destroy = amdgpu_dm_encoder_destroy,
4078 };
4079
4080
4081 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4082                                          struct drm_framebuffer *fb,
4083                                          int *min_downscale, int *max_upscale)
4084 {
4085         struct amdgpu_device *adev = drm_to_adev(dev);
4086         struct dc *dc = adev->dm.dc;
4087         /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4088         struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4089
4090         switch (fb->format->format) {
4091         case DRM_FORMAT_P010:
4092         case DRM_FORMAT_NV12:
4093         case DRM_FORMAT_NV21:
4094                 *max_upscale = plane_cap->max_upscale_factor.nv12;
4095                 *min_downscale = plane_cap->max_downscale_factor.nv12;
4096                 break;
4097
4098         case DRM_FORMAT_XRGB16161616F:
4099         case DRM_FORMAT_ARGB16161616F:
4100         case DRM_FORMAT_XBGR16161616F:
4101         case DRM_FORMAT_ABGR16161616F:
4102                 *max_upscale = plane_cap->max_upscale_factor.fp16;
4103                 *min_downscale = plane_cap->max_downscale_factor.fp16;
4104                 break;
4105
4106         default:
4107                 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4108                 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4109                 break;
4110         }
4111
4112         /*
4113          * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4114          * scaling factor of 1.0 == 1000 units.
4115          */
4116         if (*max_upscale == 1)
4117                 *max_upscale = 1000;
4118
4119         if (*min_downscale == 1)
4120                 *min_downscale = 1000;
4121 }
4122
4123
4124 static int fill_dc_scaling_info(const struct drm_plane_state *state,
4125                                 struct dc_scaling_info *scaling_info)
4126 {
4127         int scale_w, scale_h, min_downscale, max_upscale;
4128
4129         memset(scaling_info, 0, sizeof(*scaling_info));
4130
4131         /* Source is fixed 16.16 but we ignore mantissa for now... */
4132         scaling_info->src_rect.x = state->src_x >> 16;
4133         scaling_info->src_rect.y = state->src_y >> 16;
4134
4135         /*
4136          * For reasons we don't (yet) fully understand a non-zero
4137          * src_y coordinate into an NV12 buffer can cause a
4138          * system hang. To avoid hangs (and maybe be overly cautious)
4139          * let's reject both non-zero src_x and src_y.
4140          *
4141          * We currently know of only one use-case to reproduce a
4142          * scenario with non-zero src_x and src_y for NV12, which
4143          * is to gesture the YouTube Android app into full screen
4144          * on ChromeOS.
4145          */
4146         if (state->fb &&
4147             state->fb->format->format == DRM_FORMAT_NV12 &&
4148             (scaling_info->src_rect.x != 0 ||
4149              scaling_info->src_rect.y != 0))
4150                 return -EINVAL;
4151
4152         scaling_info->src_rect.width = state->src_w >> 16;
4153         if (scaling_info->src_rect.width == 0)
4154                 return -EINVAL;
4155
4156         scaling_info->src_rect.height = state->src_h >> 16;
4157         if (scaling_info->src_rect.height == 0)
4158                 return -EINVAL;
4159
4160         scaling_info->dst_rect.x = state->crtc_x;
4161         scaling_info->dst_rect.y = state->crtc_y;
4162
4163         if (state->crtc_w == 0)
4164                 return -EINVAL;
4165
4166         scaling_info->dst_rect.width = state->crtc_w;
4167
4168         if (state->crtc_h == 0)
4169                 return -EINVAL;
4170
4171         scaling_info->dst_rect.height = state->crtc_h;
4172
4173         /* DRM doesn't specify clipping on destination output. */
4174         scaling_info->clip_rect = scaling_info->dst_rect;
4175
4176         /* Validate scaling per-format with DC plane caps */
4177         if (state->plane && state->plane->dev && state->fb) {
4178                 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4179                                              &min_downscale, &max_upscale);
4180         } else {
4181                 min_downscale = 250;
4182                 max_upscale = 16000;
4183         }
4184
4185         scale_w = scaling_info->dst_rect.width * 1000 /
4186                   scaling_info->src_rect.width;
4187
4188         if (scale_w < min_downscale || scale_w > max_upscale)
4189                 return -EINVAL;
4190
4191         scale_h = scaling_info->dst_rect.height * 1000 /
4192                   scaling_info->src_rect.height;
4193
4194         if (scale_h < min_downscale || scale_h > max_upscale)
4195                 return -EINVAL;
4196
4197         /*
4198          * The "scaling_quality" can be ignored for now, quality = 0 has DC
4199          * assume reasonable defaults based on the format.
4200          */
4201
4202         return 0;
4203 }
4204
4205 static void
4206 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4207                                  uint64_t tiling_flags)
4208 {
4209         /* Fill GFX8 params */
4210         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4211                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4212
4213                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4214                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4215                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4216                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4217                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4218
4219                 /* XXX fix me for VI */
4220                 tiling_info->gfx8.num_banks = num_banks;
4221                 tiling_info->gfx8.array_mode =
4222                                 DC_ARRAY_2D_TILED_THIN1;
4223                 tiling_info->gfx8.tile_split = tile_split;
4224                 tiling_info->gfx8.bank_width = bankw;
4225                 tiling_info->gfx8.bank_height = bankh;
4226                 tiling_info->gfx8.tile_aspect = mtaspect;
4227                 tiling_info->gfx8.tile_mode =
4228                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4229         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4230                         == DC_ARRAY_1D_TILED_THIN1) {
4231                 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4232         }
4233
4234         tiling_info->gfx8.pipe_config =
4235                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4236 }
4237
4238 static void
4239 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4240                                   union dc_tiling_info *tiling_info)
4241 {
4242         tiling_info->gfx9.num_pipes =
4243                 adev->gfx.config.gb_addr_config_fields.num_pipes;
4244         tiling_info->gfx9.num_banks =
4245                 adev->gfx.config.gb_addr_config_fields.num_banks;
4246         tiling_info->gfx9.pipe_interleave =
4247                 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4248         tiling_info->gfx9.num_shader_engines =
4249                 adev->gfx.config.gb_addr_config_fields.num_se;
4250         tiling_info->gfx9.max_compressed_frags =
4251                 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4252         tiling_info->gfx9.num_rb_per_se =
4253                 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4254         tiling_info->gfx9.shaderEnable = 1;
4255         if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4256             adev->asic_type == CHIP_NAVY_FLOUNDER ||
4257             adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4258             adev->asic_type == CHIP_BEIGE_GOBY ||
4259             adev->asic_type == CHIP_VANGOGH)
4260                 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4261 }
4262
4263 static int
4264 validate_dcc(struct amdgpu_device *adev,
4265              const enum surface_pixel_format format,
4266              const enum dc_rotation_angle rotation,
4267              const union dc_tiling_info *tiling_info,
4268              const struct dc_plane_dcc_param *dcc,
4269              const struct dc_plane_address *address,
4270              const struct plane_size *plane_size)
4271 {
4272         struct dc *dc = adev->dm.dc;
4273         struct dc_dcc_surface_param input;
4274         struct dc_surface_dcc_cap output;
4275
4276         memset(&input, 0, sizeof(input));
4277         memset(&output, 0, sizeof(output));
4278
4279         if (!dcc->enable)
4280                 return 0;
4281
4282         if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4283             !dc->cap_funcs.get_dcc_compression_cap)
4284                 return -EINVAL;
4285
4286         input.format = format;
4287         input.surface_size.width = plane_size->surface_size.width;
4288         input.surface_size.height = plane_size->surface_size.height;
4289         input.swizzle_mode = tiling_info->gfx9.swizzle;
4290
4291         if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4292                 input.scan = SCAN_DIRECTION_HORIZONTAL;
4293         else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4294                 input.scan = SCAN_DIRECTION_VERTICAL;
4295
4296         if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4297                 return -EINVAL;
4298
4299         if (!output.capable)
4300                 return -EINVAL;
4301
4302         if (dcc->independent_64b_blks == 0 &&
4303             output.grph.rgb.independent_64b_blks != 0)
4304                 return -EINVAL;
4305
4306         return 0;
4307 }
4308
4309 static bool
4310 modifier_has_dcc(uint64_t modifier)
4311 {
4312         return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4313 }
4314
4315 static unsigned
4316 modifier_gfx9_swizzle_mode(uint64_t modifier)
4317 {
4318         if (modifier == DRM_FORMAT_MOD_LINEAR)
4319                 return 0;
4320
4321         return AMD_FMT_MOD_GET(TILE, modifier);
4322 }
4323
4324 static const struct drm_format_info *
4325 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4326 {
4327         return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4328 }
4329
4330 static void
4331 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4332                                     union dc_tiling_info *tiling_info,
4333                                     uint64_t modifier)
4334 {
4335         unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4336         unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4337         unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4338         unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4339
4340         fill_gfx9_tiling_info_from_device(adev, tiling_info);
4341
4342         if (!IS_AMD_FMT_MOD(modifier))
4343                 return;
4344
4345         tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4346         tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4347
4348         if (adev->family >= AMDGPU_FAMILY_NV) {
4349                 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4350         } else {
4351                 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4352
4353                 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4354         }
4355 }
4356
4357 enum dm_micro_swizzle {
4358         MICRO_SWIZZLE_Z = 0,
4359         MICRO_SWIZZLE_S = 1,
4360         MICRO_SWIZZLE_D = 2,
4361         MICRO_SWIZZLE_R = 3
4362 };
4363
4364 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4365                                           uint32_t format,
4366                                           uint64_t modifier)
4367 {
4368         struct amdgpu_device *adev = drm_to_adev(plane->dev);
4369         const struct drm_format_info *info = drm_format_info(format);
4370         int i;
4371
4372         enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4373
4374         if (!info)
4375                 return false;
4376
4377         /*
4378          * We always have to allow these modifiers:
4379          * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4380          * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4381          */
4382         if (modifier == DRM_FORMAT_MOD_LINEAR ||
4383             modifier == DRM_FORMAT_MOD_INVALID) {
4384                 return true;
4385         }
4386
4387         /* Check that the modifier is on the list of the plane's supported modifiers. */
4388         for (i = 0; i < plane->modifier_count; i++) {
4389                 if (modifier == plane->modifiers[i])
4390                         break;
4391         }
4392         if (i == plane->modifier_count)
4393                 return false;
4394
4395         /*
4396          * For D swizzle the canonical modifier depends on the bpp, so check
4397          * it here.
4398          */
4399         if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4400             adev->family >= AMDGPU_FAMILY_NV) {
4401                 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4402                         return false;
4403         }
4404
4405         if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4406             info->cpp[0] < 8)
4407                 return false;
4408
4409         if (modifier_has_dcc(modifier)) {
4410                 /* Per radeonsi comments 16/64 bpp are more complicated. */
4411                 if (info->cpp[0] != 4)
4412                         return false;
4413                 /* We support multi-planar formats, but not when combined with
4414                  * additional DCC metadata planes. */
4415                 if (info->num_planes > 1)
4416                         return false;
4417         }
4418
4419         return true;
4420 }
4421
4422 static void
4423 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4424 {
4425         if (!*mods)
4426                 return;
4427
4428         if (*cap - *size < 1) {
4429                 uint64_t new_cap = *cap * 2;
4430                 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4431
4432                 if (!new_mods) {
4433                         kfree(*mods);
4434                         *mods = NULL;
4435                         return;
4436                 }
4437
4438                 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4439                 kfree(*mods);
4440                 *mods = new_mods;
4441                 *cap = new_cap;
4442         }
4443
4444         (*mods)[*size] = mod;
4445         *size += 1;
4446 }
4447
4448 static void
4449 add_gfx9_modifiers(const struct amdgpu_device *adev,
4450                    uint64_t **mods, uint64_t *size, uint64_t *capacity)
4451 {
4452         int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4453         int pipe_xor_bits = min(8, pipes +
4454                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4455         int bank_xor_bits = min(8 - pipe_xor_bits,
4456                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4457         int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4458                  ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4459
4460
4461         if (adev->family == AMDGPU_FAMILY_RV) {
4462                 /* Raven2 and later */
4463                 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4464
4465                 /*
4466                  * No _D DCC swizzles yet because we only allow 32bpp, which
4467                  * doesn't support _D on DCN
4468                  */
4469
4470                 if (has_constant_encode) {
4471                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4472                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4473                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4474                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4475                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4476                                     AMD_FMT_MOD_SET(DCC, 1) |
4477                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4478                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4479                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4480                 }
4481
4482                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4483                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4484                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4485                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4486                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4487                             AMD_FMT_MOD_SET(DCC, 1) |
4488                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4489                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4490                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4491
4492                 if (has_constant_encode) {
4493                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4494                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4495                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4496                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4497                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4498                                     AMD_FMT_MOD_SET(DCC, 1) |
4499                                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4500                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4501                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4502
4503                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4504                                     AMD_FMT_MOD_SET(RB, rb) |
4505                                     AMD_FMT_MOD_SET(PIPE, pipes));
4506                 }
4507
4508                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4509                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4510                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4511                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4512                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4513                             AMD_FMT_MOD_SET(DCC, 1) |
4514                             AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4515                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4516                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4517                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4518                             AMD_FMT_MOD_SET(RB, rb) |
4519                             AMD_FMT_MOD_SET(PIPE, pipes));
4520         }
4521
4522         /*
4523          * Only supported for 64bpp on Raven, will be filtered on format in
4524          * dm_plane_format_mod_supported.
4525          */
4526         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4527                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4528                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4529                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4530                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4531
4532         if (adev->family == AMDGPU_FAMILY_RV) {
4533                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4534                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4535                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4536                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4537                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4538         }
4539
4540         /*
4541          * Only supported for 64bpp on Raven, will be filtered on format in
4542          * dm_plane_format_mod_supported.
4543          */
4544         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4545                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4546                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4547
4548         if (adev->family == AMDGPU_FAMILY_RV) {
4549                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4550                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4551                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4552         }
4553 }
4554
4555 static void
4556 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4557                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
4558 {
4559         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4560
4561         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4562                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4563                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4564                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4565                     AMD_FMT_MOD_SET(DCC, 1) |
4566                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4567                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4568                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4569
4570         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4571                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4572                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4573                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4574                     AMD_FMT_MOD_SET(DCC, 1) |
4575                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4576                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4577                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4578                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4579
4580         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4581                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4582                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4583                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4584
4585         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4586                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4587                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4588                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4589
4590
4591         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4592         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4593                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4594                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4595
4596         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4597                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4598                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4599 }
4600
4601 static void
4602 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4603                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
4604 {
4605         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4606         int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4607
4608         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4609                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4610                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4611                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4612                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
4613                     AMD_FMT_MOD_SET(DCC, 1) |
4614                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4615                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4616                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4617                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4618
4619         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4620                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4621                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4622                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4623                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
4624                     AMD_FMT_MOD_SET(DCC, 1) |
4625                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4626                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4627                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4628                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4629                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4630
4631         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4632                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4633                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4634                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4635                     AMD_FMT_MOD_SET(PACKERS, pkrs));
4636
4637         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4638                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4639                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4640                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4641                     AMD_FMT_MOD_SET(PACKERS, pkrs));
4642
4643         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4644         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4645                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4646                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4647
4648         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4649                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4650                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4651 }
4652
4653 static int
4654 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4655 {
4656         uint64_t size = 0, capacity = 128;
4657         *mods = NULL;
4658
4659         /* We have not hooked up any pre-GFX9 modifiers. */
4660         if (adev->family < AMDGPU_FAMILY_AI)
4661                 return 0;
4662
4663         *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4664
4665         if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4666                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4667                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4668                 return *mods ? 0 : -ENOMEM;
4669         }
4670
4671         switch (adev->family) {
4672         case AMDGPU_FAMILY_AI:
4673         case AMDGPU_FAMILY_RV:
4674                 add_gfx9_modifiers(adev, mods, &size, &capacity);
4675                 break;
4676         case AMDGPU_FAMILY_NV:
4677         case AMDGPU_FAMILY_VGH:
4678                 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4679                         add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4680                 else
4681                         add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4682                 break;
4683         }
4684
4685         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4686
4687         /* INVALID marks the end of the list. */
4688         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4689
4690         if (!*mods)
4691                 return -ENOMEM;
4692
4693         return 0;
4694 }
4695
4696 static int
4697 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4698                                           const struct amdgpu_framebuffer *afb,
4699                                           const enum surface_pixel_format format,
4700                                           const enum dc_rotation_angle rotation,
4701                                           const struct plane_size *plane_size,
4702                                           union dc_tiling_info *tiling_info,
4703                                           struct dc_plane_dcc_param *dcc,
4704                                           struct dc_plane_address *address,
4705                                           const bool force_disable_dcc)
4706 {
4707         const uint64_t modifier = afb->base.modifier;
4708         int ret;
4709
4710         fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4711         tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4712
4713         if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4714                 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4715
4716                 dcc->enable = 1;
4717                 dcc->meta_pitch = afb->base.pitches[1];
4718                 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4719
4720                 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4721                 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4722         }
4723
4724         ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4725         if (ret)
4726                 return ret;
4727
4728         return 0;
4729 }
4730
4731 static int
4732 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4733                              const struct amdgpu_framebuffer *afb,
4734                              const enum surface_pixel_format format,
4735                              const enum dc_rotation_angle rotation,
4736                              const uint64_t tiling_flags,
4737                              union dc_tiling_info *tiling_info,
4738                              struct plane_size *plane_size,
4739                              struct dc_plane_dcc_param *dcc,
4740                              struct dc_plane_address *address,
4741                              bool tmz_surface,
4742                              bool force_disable_dcc)
4743 {
4744         const struct drm_framebuffer *fb = &afb->base;
4745         int ret;
4746
4747         memset(tiling_info, 0, sizeof(*tiling_info));
4748         memset(plane_size, 0, sizeof(*plane_size));
4749         memset(dcc, 0, sizeof(*dcc));
4750         memset(address, 0, sizeof(*address));
4751
4752         address->tmz_surface = tmz_surface;
4753
4754         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4755                 uint64_t addr = afb->address + fb->offsets[0];
4756
4757                 plane_size->surface_size.x = 0;
4758                 plane_size->surface_size.y = 0;
4759                 plane_size->surface_size.width = fb->width;
4760                 plane_size->surface_size.height = fb->height;
4761                 plane_size->surface_pitch =
4762                         fb->pitches[0] / fb->format->cpp[0];
4763
4764                 address->type = PLN_ADDR_TYPE_GRAPHICS;
4765                 address->grph.addr.low_part = lower_32_bits(addr);
4766                 address->grph.addr.high_part = upper_32_bits(addr);
4767         } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4768                 uint64_t luma_addr = afb->address + fb->offsets[0];
4769                 uint64_t chroma_addr = afb->address + fb->offsets[1];
4770
4771                 plane_size->surface_size.x = 0;
4772                 plane_size->surface_size.y = 0;
4773                 plane_size->surface_size.width = fb->width;
4774                 plane_size->surface_size.height = fb->height;
4775                 plane_size->surface_pitch =
4776                         fb->pitches[0] / fb->format->cpp[0];
4777
4778                 plane_size->chroma_size.x = 0;
4779                 plane_size->chroma_size.y = 0;
4780                 /* TODO: set these based on surface format */
4781                 plane_size->chroma_size.width = fb->width / 2;
4782                 plane_size->chroma_size.height = fb->height / 2;
4783
4784                 plane_size->chroma_pitch =
4785                         fb->pitches[1] / fb->format->cpp[1];
4786
4787                 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4788                 address->video_progressive.luma_addr.low_part =
4789                         lower_32_bits(luma_addr);
4790                 address->video_progressive.luma_addr.high_part =
4791                         upper_32_bits(luma_addr);
4792                 address->video_progressive.chroma_addr.low_part =
4793                         lower_32_bits(chroma_addr);
4794                 address->video_progressive.chroma_addr.high_part =
4795                         upper_32_bits(chroma_addr);
4796         }
4797
4798         if (adev->family >= AMDGPU_FAMILY_AI) {
4799                 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4800                                                                 rotation, plane_size,
4801                                                                 tiling_info, dcc,
4802                                                                 address,
4803                                                                 force_disable_dcc);
4804                 if (ret)
4805                         return ret;
4806         } else {
4807                 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4808         }
4809
4810         return 0;
4811 }
4812
4813 static void
4814 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4815                                bool *per_pixel_alpha, bool *global_alpha,
4816                                int *global_alpha_value)
4817 {
4818         *per_pixel_alpha = false;
4819         *global_alpha = false;
4820         *global_alpha_value = 0xff;
4821
4822         if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4823                 return;
4824
4825         if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4826                 static const uint32_t alpha_formats[] = {
4827                         DRM_FORMAT_ARGB8888,
4828                         DRM_FORMAT_RGBA8888,
4829                         DRM_FORMAT_ABGR8888,
4830                 };
4831                 uint32_t format = plane_state->fb->format->format;
4832                 unsigned int i;
4833
4834                 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4835                         if (format == alpha_formats[i]) {
4836                                 *per_pixel_alpha = true;
4837                                 break;
4838                         }
4839                 }
4840         }
4841
4842         if (plane_state->alpha < 0xffff) {
4843                 *global_alpha = true;
4844                 *global_alpha_value = plane_state->alpha >> 8;
4845         }
4846 }
4847
4848 static int
4849 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4850                             const enum surface_pixel_format format,
4851                             enum dc_color_space *color_space)
4852 {
4853         bool full_range;
4854
4855         *color_space = COLOR_SPACE_SRGB;
4856
4857         /* DRM color properties only affect non-RGB formats. */
4858         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4859                 return 0;
4860
4861         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4862
4863         switch (plane_state->color_encoding) {
4864         case DRM_COLOR_YCBCR_BT601:
4865                 if (full_range)
4866                         *color_space = COLOR_SPACE_YCBCR601;
4867                 else
4868                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4869                 break;
4870
4871         case DRM_COLOR_YCBCR_BT709:
4872                 if (full_range)
4873                         *color_space = COLOR_SPACE_YCBCR709;
4874                 else
4875                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4876                 break;
4877
4878         case DRM_COLOR_YCBCR_BT2020:
4879                 if (full_range)
4880                         *color_space = COLOR_SPACE_2020_YCBCR;
4881                 else
4882                         return -EINVAL;
4883                 break;
4884
4885         default:
4886                 return -EINVAL;
4887         }
4888
4889         return 0;
4890 }
4891
4892 static int
4893 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4894                             const struct drm_plane_state *plane_state,
4895                             const uint64_t tiling_flags,
4896                             struct dc_plane_info *plane_info,
4897                             struct dc_plane_address *address,
4898                             bool tmz_surface,
4899                             bool force_disable_dcc)
4900 {
4901         const struct drm_framebuffer *fb = plane_state->fb;
4902         const struct amdgpu_framebuffer *afb =
4903                 to_amdgpu_framebuffer(plane_state->fb);
4904         int ret;
4905
4906         memset(plane_info, 0, sizeof(*plane_info));
4907
4908         switch (fb->format->format) {
4909         case DRM_FORMAT_C8:
4910                 plane_info->format =
4911                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4912                 break;
4913         case DRM_FORMAT_RGB565:
4914                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4915                 break;
4916         case DRM_FORMAT_XRGB8888:
4917         case DRM_FORMAT_ARGB8888:
4918                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4919                 break;
4920         case DRM_FORMAT_XRGB2101010:
4921         case DRM_FORMAT_ARGB2101010:
4922                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4923                 break;
4924         case DRM_FORMAT_XBGR2101010:
4925         case DRM_FORMAT_ABGR2101010:
4926                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4927                 break;
4928         case DRM_FORMAT_XBGR8888:
4929         case DRM_FORMAT_ABGR8888:
4930                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4931                 break;
4932         case DRM_FORMAT_NV21:
4933                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4934                 break;
4935         case DRM_FORMAT_NV12:
4936                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4937                 break;
4938         case DRM_FORMAT_P010:
4939                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4940                 break;
4941         case DRM_FORMAT_XRGB16161616F:
4942         case DRM_FORMAT_ARGB16161616F:
4943                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4944                 break;
4945         case DRM_FORMAT_XBGR16161616F:
4946         case DRM_FORMAT_ABGR16161616F:
4947                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4948                 break;
4949         default:
4950                 DRM_ERROR(
4951                         "Unsupported screen format %p4cc\n",
4952                         &fb->format->format);
4953                 return -EINVAL;
4954         }
4955
4956         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4957         case DRM_MODE_ROTATE_0:
4958                 plane_info->rotation = ROTATION_ANGLE_0;
4959                 break;
4960         case DRM_MODE_ROTATE_90:
4961                 plane_info->rotation = ROTATION_ANGLE_90;
4962                 break;
4963         case DRM_MODE_ROTATE_180:
4964                 plane_info->rotation = ROTATION_ANGLE_180;
4965                 break;
4966         case DRM_MODE_ROTATE_270:
4967                 plane_info->rotation = ROTATION_ANGLE_270;
4968                 break;
4969         default:
4970                 plane_info->rotation = ROTATION_ANGLE_0;
4971                 break;
4972         }
4973
4974         plane_info->visible = true;
4975         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4976
4977         plane_info->layer_index = 0;
4978
4979         ret = fill_plane_color_attributes(plane_state, plane_info->format,
4980                                           &plane_info->color_space);
4981         if (ret)
4982                 return ret;
4983
4984         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4985                                            plane_info->rotation, tiling_flags,
4986                                            &plane_info->tiling_info,
4987                                            &plane_info->plane_size,
4988                                            &plane_info->dcc, address, tmz_surface,
4989                                            force_disable_dcc);
4990         if (ret)
4991                 return ret;
4992
4993         fill_blending_from_plane_state(
4994                 plane_state, &plane_info->per_pixel_alpha,
4995                 &plane_info->global_alpha, &plane_info->global_alpha_value);
4996
4997         return 0;
4998 }
4999
5000 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5001                                     struct dc_plane_state *dc_plane_state,
5002                                     struct drm_plane_state *plane_state,
5003                                     struct drm_crtc_state *crtc_state)
5004 {
5005         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5006         struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5007         struct dc_scaling_info scaling_info;
5008         struct dc_plane_info plane_info;
5009         int ret;
5010         bool force_disable_dcc = false;
5011
5012         ret = fill_dc_scaling_info(plane_state, &scaling_info);
5013         if (ret)
5014                 return ret;
5015
5016         dc_plane_state->src_rect = scaling_info.src_rect;
5017         dc_plane_state->dst_rect = scaling_info.dst_rect;
5018         dc_plane_state->clip_rect = scaling_info.clip_rect;
5019         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5020
5021         force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5022         ret = fill_dc_plane_info_and_addr(adev, plane_state,
5023                                           afb->tiling_flags,
5024                                           &plane_info,
5025                                           &dc_plane_state->address,
5026                                           afb->tmz_surface,
5027                                           force_disable_dcc);
5028         if (ret)
5029                 return ret;
5030
5031         dc_plane_state->format = plane_info.format;
5032         dc_plane_state->color_space = plane_info.color_space;
5033         dc_plane_state->format = plane_info.format;
5034         dc_plane_state->plane_size = plane_info.plane_size;
5035         dc_plane_state->rotation = plane_info.rotation;
5036         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5037         dc_plane_state->stereo_format = plane_info.stereo_format;
5038         dc_plane_state->tiling_info = plane_info.tiling_info;
5039         dc_plane_state->visible = plane_info.visible;
5040         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5041         dc_plane_state->global_alpha = plane_info.global_alpha;
5042         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5043         dc_plane_state->dcc = plane_info.dcc;
5044         dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5045         dc_plane_state->flip_int_enabled = true;
5046
5047         /*
5048          * Always set input transfer function, since plane state is refreshed
5049          * every time.
5050          */
5051         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5052         if (ret)
5053                 return ret;
5054
5055         return 0;
5056 }
5057
5058 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5059                                            const struct dm_connector_state *dm_state,
5060                                            struct dc_stream_state *stream)
5061 {
5062         enum amdgpu_rmx_type rmx_type;
5063
5064         struct rect src = { 0 }; /* viewport in composition space*/
5065         struct rect dst = { 0 }; /* stream addressable area */
5066
5067         /* no mode. nothing to be done */
5068         if (!mode)
5069                 return;
5070
5071         /* Full screen scaling by default */
5072         src.width = mode->hdisplay;
5073         src.height = mode->vdisplay;
5074         dst.width = stream->timing.h_addressable;
5075         dst.height = stream->timing.v_addressable;
5076
5077         if (dm_state) {
5078                 rmx_type = dm_state->scaling;
5079                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5080                         if (src.width * dst.height <
5081                                         src.height * dst.width) {
5082                                 /* height needs less upscaling/more downscaling */
5083                                 dst.width = src.width *
5084                                                 dst.height / src.height;
5085                         } else {
5086                                 /* width needs less upscaling/more downscaling */
5087                                 dst.height = src.height *
5088                                                 dst.width / src.width;
5089                         }
5090                 } else if (rmx_type == RMX_CENTER) {
5091                         dst = src;
5092                 }
5093
5094                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5095                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
5096
5097                 if (dm_state->underscan_enable) {
5098                         dst.x += dm_state->underscan_hborder / 2;
5099                         dst.y += dm_state->underscan_vborder / 2;
5100                         dst.width -= dm_state->underscan_hborder;
5101                         dst.height -= dm_state->underscan_vborder;
5102                 }
5103         }
5104
5105         stream->src = src;
5106         stream->dst = dst;
5107
5108         DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5109                       dst.x, dst.y, dst.width, dst.height);
5110
5111 }
5112
5113 static enum dc_color_depth
5114 convert_color_depth_from_display_info(const struct drm_connector *connector,
5115                                       bool is_y420, int requested_bpc)
5116 {
5117         uint8_t bpc;
5118
5119         if (is_y420) {
5120                 bpc = 8;
5121
5122                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5123                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5124                         bpc = 16;
5125                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5126                         bpc = 12;
5127                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5128                         bpc = 10;
5129         } else {
5130                 bpc = (uint8_t)connector->display_info.bpc;
5131                 /* Assume 8 bpc by default if no bpc is specified. */
5132                 bpc = bpc ? bpc : 8;
5133         }
5134
5135         if (requested_bpc > 0) {
5136                 /*
5137                  * Cap display bpc based on the user requested value.
5138                  *
5139                  * The value for state->max_bpc may not correctly updated
5140                  * depending on when the connector gets added to the state
5141                  * or if this was called outside of atomic check, so it
5142                  * can't be used directly.
5143                  */
5144                 bpc = min_t(u8, bpc, requested_bpc);
5145
5146                 /* Round down to the nearest even number. */
5147                 bpc = bpc - (bpc & 1);
5148         }
5149
5150         switch (bpc) {
5151         case 0:
5152                 /*
5153                  * Temporary Work around, DRM doesn't parse color depth for
5154                  * EDID revision before 1.4
5155                  * TODO: Fix edid parsing
5156                  */
5157                 return COLOR_DEPTH_888;
5158         case 6:
5159                 return COLOR_DEPTH_666;
5160         case 8:
5161                 return COLOR_DEPTH_888;
5162         case 10:
5163                 return COLOR_DEPTH_101010;
5164         case 12:
5165                 return COLOR_DEPTH_121212;
5166         case 14:
5167                 return COLOR_DEPTH_141414;
5168         case 16:
5169                 return COLOR_DEPTH_161616;
5170         default:
5171                 return COLOR_DEPTH_UNDEFINED;
5172         }
5173 }
5174
5175 static enum dc_aspect_ratio
5176 get_aspect_ratio(const struct drm_display_mode *mode_in)
5177 {
5178         /* 1-1 mapping, since both enums follow the HDMI spec. */
5179         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5180 }
5181
5182 static enum dc_color_space
5183 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5184 {
5185         enum dc_color_space color_space = COLOR_SPACE_SRGB;
5186
5187         switch (dc_crtc_timing->pixel_encoding) {
5188         case PIXEL_ENCODING_YCBCR422:
5189         case PIXEL_ENCODING_YCBCR444:
5190         case PIXEL_ENCODING_YCBCR420:
5191         {
5192                 /*
5193                  * 27030khz is the separation point between HDTV and SDTV
5194                  * according to HDMI spec, we use YCbCr709 and YCbCr601
5195                  * respectively
5196                  */
5197                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
5198                         if (dc_crtc_timing->flags.Y_ONLY)
5199                                 color_space =
5200                                         COLOR_SPACE_YCBCR709_LIMITED;
5201                         else
5202                                 color_space = COLOR_SPACE_YCBCR709;
5203                 } else {
5204                         if (dc_crtc_timing->flags.Y_ONLY)
5205                                 color_space =
5206                                         COLOR_SPACE_YCBCR601_LIMITED;
5207                         else
5208                                 color_space = COLOR_SPACE_YCBCR601;
5209                 }
5210
5211         }
5212         break;
5213         case PIXEL_ENCODING_RGB:
5214                 color_space = COLOR_SPACE_SRGB;
5215                 break;
5216
5217         default:
5218                 WARN_ON(1);
5219                 break;
5220         }
5221
5222         return color_space;
5223 }
5224
5225 static bool adjust_colour_depth_from_display_info(
5226         struct dc_crtc_timing *timing_out,
5227         const struct drm_display_info *info)
5228 {
5229         enum dc_color_depth depth = timing_out->display_color_depth;
5230         int normalized_clk;
5231         do {
5232                 normalized_clk = timing_out->pix_clk_100hz / 10;
5233                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5234                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5235                         normalized_clk /= 2;
5236                 /* Adjusting pix clock following on HDMI spec based on colour depth */
5237                 switch (depth) {
5238                 case COLOR_DEPTH_888:
5239                         break;
5240                 case COLOR_DEPTH_101010:
5241                         normalized_clk = (normalized_clk * 30) / 24;
5242                         break;
5243                 case COLOR_DEPTH_121212:
5244                         normalized_clk = (normalized_clk * 36) / 24;
5245                         break;
5246                 case COLOR_DEPTH_161616:
5247                         normalized_clk = (normalized_clk * 48) / 24;
5248                         break;
5249                 default:
5250                         /* The above depths are the only ones valid for HDMI. */
5251                         return false;
5252                 }
5253                 if (normalized_clk <= info->max_tmds_clock) {
5254                         timing_out->display_color_depth = depth;
5255                         return true;
5256                 }
5257         } while (--depth > COLOR_DEPTH_666);
5258         return false;
5259 }
5260
5261 static void fill_stream_properties_from_drm_display_mode(
5262         struct dc_stream_state *stream,
5263         const struct drm_display_mode *mode_in,
5264         const struct drm_connector *connector,
5265         const struct drm_connector_state *connector_state,
5266         const struct dc_stream_state *old_stream,
5267         int requested_bpc)
5268 {
5269         struct dc_crtc_timing *timing_out = &stream->timing;
5270         const struct drm_display_info *info = &connector->display_info;
5271         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5272         struct hdmi_vendor_infoframe hv_frame;
5273         struct hdmi_avi_infoframe avi_frame;
5274
5275         memset(&hv_frame, 0, sizeof(hv_frame));
5276         memset(&avi_frame, 0, sizeof(avi_frame));
5277
5278         timing_out->h_border_left = 0;
5279         timing_out->h_border_right = 0;
5280         timing_out->v_border_top = 0;
5281         timing_out->v_border_bottom = 0;
5282         /* TODO: un-hardcode */
5283         if (drm_mode_is_420_only(info, mode_in)
5284                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5285                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5286         else if (drm_mode_is_420_also(info, mode_in)
5287                         && aconnector->force_yuv420_output)
5288                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5289         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5290                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5291                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5292         else
5293                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5294
5295         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5296         timing_out->display_color_depth = convert_color_depth_from_display_info(
5297                 connector,
5298                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5299                 requested_bpc);
5300         timing_out->scan_type = SCANNING_TYPE_NODATA;
5301         timing_out->hdmi_vic = 0;
5302
5303         if(old_stream) {
5304                 timing_out->vic = old_stream->timing.vic;
5305                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5306                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5307         } else {
5308                 timing_out->vic = drm_match_cea_mode(mode_in);
5309                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5310                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5311                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5312                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5313         }
5314
5315         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5316                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5317                 timing_out->vic = avi_frame.video_code;
5318                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5319                 timing_out->hdmi_vic = hv_frame.vic;
5320         }
5321
5322         if (is_freesync_video_mode(mode_in, aconnector)) {
5323                 timing_out->h_addressable = mode_in->hdisplay;
5324                 timing_out->h_total = mode_in->htotal;
5325                 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5326                 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5327                 timing_out->v_total = mode_in->vtotal;
5328                 timing_out->v_addressable = mode_in->vdisplay;
5329                 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5330                 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5331                 timing_out->pix_clk_100hz = mode_in->clock * 10;
5332         } else {
5333                 timing_out->h_addressable = mode_in->crtc_hdisplay;
5334                 timing_out->h_total = mode_in->crtc_htotal;
5335                 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5336                 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5337                 timing_out->v_total = mode_in->crtc_vtotal;
5338                 timing_out->v_addressable = mode_in->crtc_vdisplay;
5339                 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5340                 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5341                 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5342         }
5343
5344         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5345
5346         stream->output_color_space = get_output_color_space(timing_out);
5347
5348         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5349         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5350         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5351                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5352                     drm_mode_is_420_also(info, mode_in) &&
5353                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5354                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5355                         adjust_colour_depth_from_display_info(timing_out, info);
5356                 }
5357         }
5358 }
5359
5360 static void fill_audio_info(struct audio_info *audio_info,
5361                             const struct drm_connector *drm_connector,
5362                             const struct dc_sink *dc_sink)
5363 {
5364         int i = 0;
5365         int cea_revision = 0;
5366         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5367
5368         audio_info->manufacture_id = edid_caps->manufacturer_id;
5369         audio_info->product_id = edid_caps->product_id;
5370
5371         cea_revision = drm_connector->display_info.cea_rev;
5372
5373         strscpy(audio_info->display_name,
5374                 edid_caps->display_name,
5375                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5376
5377         if (cea_revision >= 3) {
5378                 audio_info->mode_count = edid_caps->audio_mode_count;
5379
5380                 for (i = 0; i < audio_info->mode_count; ++i) {
5381                         audio_info->modes[i].format_code =
5382                                         (enum audio_format_code)
5383                                         (edid_caps->audio_modes[i].format_code);
5384                         audio_info->modes[i].channel_count =
5385                                         edid_caps->audio_modes[i].channel_count;
5386                         audio_info->modes[i].sample_rates.all =
5387                                         edid_caps->audio_modes[i].sample_rate;
5388                         audio_info->modes[i].sample_size =
5389                                         edid_caps->audio_modes[i].sample_size;
5390                 }
5391         }
5392
5393         audio_info->flags.all = edid_caps->speaker_flags;
5394
5395         /* TODO: We only check for the progressive mode, check for interlace mode too */
5396         if (drm_connector->latency_present[0]) {
5397                 audio_info->video_latency = drm_connector->video_latency[0];
5398                 audio_info->audio_latency = drm_connector->audio_latency[0];
5399         }
5400
5401         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5402
5403 }
5404
5405 static void
5406 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5407                                       struct drm_display_mode *dst_mode)
5408 {
5409         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5410         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5411         dst_mode->crtc_clock = src_mode->crtc_clock;
5412         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5413         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5414         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5415         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5416         dst_mode->crtc_htotal = src_mode->crtc_htotal;
5417         dst_mode->crtc_hskew = src_mode->crtc_hskew;
5418         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5419         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5420         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5421         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5422         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5423 }
5424
5425 static void
5426 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5427                                         const struct drm_display_mode *native_mode,
5428                                         bool scale_enabled)
5429 {
5430         if (scale_enabled) {
5431                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5432         } else if (native_mode->clock == drm_mode->clock &&
5433                         native_mode->htotal == drm_mode->htotal &&
5434                         native_mode->vtotal == drm_mode->vtotal) {
5435                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5436         } else {
5437                 /* no scaling nor amdgpu inserted, no need to patch */
5438         }
5439 }
5440
5441 static struct dc_sink *
5442 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5443 {
5444         struct dc_sink_init_data sink_init_data = { 0 };
5445         struct dc_sink *sink = NULL;
5446         sink_init_data.link = aconnector->dc_link;
5447         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5448
5449         sink = dc_sink_create(&sink_init_data);
5450         if (!sink) {
5451                 DRM_ERROR("Failed to create sink!\n");
5452                 return NULL;
5453         }
5454         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5455
5456         return sink;
5457 }
5458
5459 static void set_multisync_trigger_params(
5460                 struct dc_stream_state *stream)
5461 {
5462         struct dc_stream_state *master = NULL;
5463
5464         if (stream->triggered_crtc_reset.enabled) {
5465                 master = stream->triggered_crtc_reset.event_source;
5466                 stream->triggered_crtc_reset.event =
5467                         master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5468                         CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5469                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5470         }
5471 }
5472
5473 static void set_master_stream(struct dc_stream_state *stream_set[],
5474                               int stream_count)
5475 {
5476         int j, highest_rfr = 0, master_stream = 0;
5477
5478         for (j = 0;  j < stream_count; j++) {
5479                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5480                         int refresh_rate = 0;
5481
5482                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5483                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5484                         if (refresh_rate > highest_rfr) {
5485                                 highest_rfr = refresh_rate;
5486                                 master_stream = j;
5487                         }
5488                 }
5489         }
5490         for (j = 0;  j < stream_count; j++) {
5491                 if (stream_set[j])
5492                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5493         }
5494 }
5495
5496 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5497 {
5498         int i = 0;
5499         struct dc_stream_state *stream;
5500
5501         if (context->stream_count < 2)
5502                 return;
5503         for (i = 0; i < context->stream_count ; i++) {
5504                 if (!context->streams[i])
5505                         continue;
5506                 /*
5507                  * TODO: add a function to read AMD VSDB bits and set
5508                  * crtc_sync_master.multi_sync_enabled flag
5509                  * For now it's set to false
5510                  */
5511         }
5512
5513         set_master_stream(context->streams, context->stream_count);
5514
5515         for (i = 0; i < context->stream_count ; i++) {
5516                 stream = context->streams[i];
5517
5518                 if (!stream)
5519                         continue;
5520
5521                 set_multisync_trigger_params(stream);
5522         }
5523 }
5524
5525 static struct drm_display_mode *
5526 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5527                           bool use_probed_modes)
5528 {
5529         struct drm_display_mode *m, *m_pref = NULL;
5530         u16 current_refresh, highest_refresh;
5531         struct list_head *list_head = use_probed_modes ?
5532                                                     &aconnector->base.probed_modes :
5533                                                     &aconnector->base.modes;
5534
5535         if (aconnector->freesync_vid_base.clock != 0)
5536                 return &aconnector->freesync_vid_base;
5537
5538         /* Find the preferred mode */
5539         list_for_each_entry (m, list_head, head) {
5540                 if (m->type & DRM_MODE_TYPE_PREFERRED) {
5541                         m_pref = m;
5542                         break;
5543                 }
5544         }
5545
5546         if (!m_pref) {
5547                 /* Probably an EDID with no preferred mode. Fallback to first entry */
5548                 m_pref = list_first_entry_or_null(
5549                         &aconnector->base.modes, struct drm_display_mode, head);
5550                 if (!m_pref) {
5551                         DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5552                         return NULL;
5553                 }
5554         }
5555
5556         highest_refresh = drm_mode_vrefresh(m_pref);
5557
5558         /*
5559          * Find the mode with highest refresh rate with same resolution.
5560          * For some monitors, preferred mode is not the mode with highest
5561          * supported refresh rate.
5562          */
5563         list_for_each_entry (m, list_head, head) {
5564                 current_refresh  = drm_mode_vrefresh(m);
5565
5566                 if (m->hdisplay == m_pref->hdisplay &&
5567                     m->vdisplay == m_pref->vdisplay &&
5568                     highest_refresh < current_refresh) {
5569                         highest_refresh = current_refresh;
5570                         m_pref = m;
5571                 }
5572         }
5573
5574         aconnector->freesync_vid_base = *m_pref;
5575         return m_pref;
5576 }
5577
5578 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
5579                                    struct amdgpu_dm_connector *aconnector)
5580 {
5581         struct drm_display_mode *high_mode;
5582         int timing_diff;
5583
5584         high_mode = get_highest_refresh_rate_mode(aconnector, false);
5585         if (!high_mode || !mode)
5586                 return false;
5587
5588         timing_diff = high_mode->vtotal - mode->vtotal;
5589
5590         if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5591             high_mode->hdisplay != mode->hdisplay ||
5592             high_mode->vdisplay != mode->vdisplay ||
5593             high_mode->hsync_start != mode->hsync_start ||
5594             high_mode->hsync_end != mode->hsync_end ||
5595             high_mode->htotal != mode->htotal ||
5596             high_mode->hskew != mode->hskew ||
5597             high_mode->vscan != mode->vscan ||
5598             high_mode->vsync_start - mode->vsync_start != timing_diff ||
5599             high_mode->vsync_end - mode->vsync_end != timing_diff)
5600                 return false;
5601         else
5602                 return true;
5603 }
5604
5605 static struct dc_stream_state *
5606 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5607                        const struct drm_display_mode *drm_mode,
5608                        const struct dm_connector_state *dm_state,
5609                        const struct dc_stream_state *old_stream,
5610                        int requested_bpc)
5611 {
5612         struct drm_display_mode *preferred_mode = NULL;
5613         struct drm_connector *drm_connector;
5614         const struct drm_connector_state *con_state =
5615                 dm_state ? &dm_state->base : NULL;
5616         struct dc_stream_state *stream = NULL;
5617         struct drm_display_mode mode = *drm_mode;
5618         struct drm_display_mode saved_mode;
5619         struct drm_display_mode *freesync_mode = NULL;
5620         bool native_mode_found = false;
5621         bool recalculate_timing = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5622         int mode_refresh;
5623         int preferred_refresh = 0;
5624 #if defined(CONFIG_DRM_AMD_DC_DCN)
5625         struct dsc_dec_dpcd_caps dsc_caps;
5626         uint32_t link_bandwidth_kbps;
5627 #endif
5628         struct dc_sink *sink = NULL;
5629
5630         memset(&saved_mode, 0, sizeof(saved_mode));
5631
5632         if (aconnector == NULL) {
5633                 DRM_ERROR("aconnector is NULL!\n");
5634                 return stream;
5635         }
5636
5637         drm_connector = &aconnector->base;
5638
5639         if (!aconnector->dc_sink) {
5640                 sink = create_fake_sink(aconnector);
5641                 if (!sink)
5642                         return stream;
5643         } else {
5644                 sink = aconnector->dc_sink;
5645                 dc_sink_retain(sink);
5646         }
5647
5648         stream = dc_create_stream_for_sink(sink);
5649
5650         if (stream == NULL) {
5651                 DRM_ERROR("Failed to create stream for sink!\n");
5652                 goto finish;
5653         }
5654
5655         stream->dm_stream_context = aconnector;
5656
5657         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5658                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5659
5660         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5661                 /* Search for preferred mode */
5662                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5663                         native_mode_found = true;
5664                         break;
5665                 }
5666         }
5667         if (!native_mode_found)
5668                 preferred_mode = list_first_entry_or_null(
5669                                 &aconnector->base.modes,
5670                                 struct drm_display_mode,
5671                                 head);
5672
5673         mode_refresh = drm_mode_vrefresh(&mode);
5674
5675         if (preferred_mode == NULL) {
5676                 /*
5677                  * This may not be an error, the use case is when we have no
5678                  * usermode calls to reset and set mode upon hotplug. In this
5679                  * case, we call set mode ourselves to restore the previous mode
5680                  * and the modelist may not be filled in in time.
5681                  */
5682                 DRM_DEBUG_DRIVER("No preferred mode found\n");
5683         } else {
5684                 recalculate_timing |= amdgpu_freesync_vid_mode &&
5685                                  is_freesync_video_mode(&mode, aconnector);
5686                 if (recalculate_timing) {
5687                         freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5688                         saved_mode = mode;
5689                         mode = *freesync_mode;
5690                 } else {
5691                         decide_crtc_timing_for_drm_display_mode(
5692                                 &mode, preferred_mode,
5693                                 dm_state ? (dm_state->scaling != RMX_OFF) : false);
5694                 }
5695
5696                 preferred_refresh = drm_mode_vrefresh(preferred_mode);
5697         }
5698
5699         if (recalculate_timing)
5700                 drm_mode_set_crtcinfo(&saved_mode, 0);
5701         else if (!dm_state)
5702                 drm_mode_set_crtcinfo(&mode, 0);
5703
5704        /*
5705         * If scaling is enabled and refresh rate didn't change
5706         * we copy the vic and polarities of the old timings
5707         */
5708         if (!recalculate_timing || mode_refresh != preferred_refresh)
5709                 fill_stream_properties_from_drm_display_mode(
5710                         stream, &mode, &aconnector->base, con_state, NULL,
5711                         requested_bpc);
5712         else
5713                 fill_stream_properties_from_drm_display_mode(
5714                         stream, &mode, &aconnector->base, con_state, old_stream,
5715                         requested_bpc);
5716
5717         stream->timing.flags.DSC = 0;
5718
5719         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5720 #if defined(CONFIG_DRM_AMD_DC_DCN)
5721                 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5722                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5723                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5724                                       &dsc_caps);
5725                 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5726                                                              dc_link_get_link_cap(aconnector->dc_link));
5727
5728                 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5729                         /* Set DSC policy according to dsc_clock_en */
5730                         dc_dsc_policy_set_enable_dsc_when_not_needed(
5731                                 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5732
5733                         if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5734                                                   &dsc_caps,
5735                                                   aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5736                                                   0,
5737                                                   link_bandwidth_kbps,
5738                                                   &stream->timing,
5739                                                   &stream->timing.dsc_cfg))
5740                                 stream->timing.flags.DSC = 1;
5741                         /* Overwrite the stream flag if DSC is enabled through debugfs */
5742                         if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5743                                 stream->timing.flags.DSC = 1;
5744
5745                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5746                                 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5747
5748                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5749                                 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5750
5751                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5752                                 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5753                 }
5754 #endif
5755         }
5756
5757         update_stream_scaling_settings(&mode, dm_state, stream);
5758
5759         fill_audio_info(
5760                 &stream->audio_info,
5761                 drm_connector,
5762                 sink);
5763
5764         update_stream_signal(stream, sink);
5765
5766         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5767                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5768
5769         if (stream->link->psr_settings.psr_feature_enabled) {
5770                 //
5771                 // should decide stream support vsc sdp colorimetry capability
5772                 // before building vsc info packet
5773                 //
5774                 stream->use_vsc_sdp_for_colorimetry = false;
5775                 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5776                         stream->use_vsc_sdp_for_colorimetry =
5777                                 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5778                 } else {
5779                         if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5780                                 stream->use_vsc_sdp_for_colorimetry = true;
5781                 }
5782                 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5783         }
5784 finish:
5785         dc_sink_release(sink);
5786
5787         return stream;
5788 }
5789
5790 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5791 {
5792         drm_crtc_cleanup(crtc);
5793         kfree(crtc);
5794 }
5795
5796 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5797                                   struct drm_crtc_state *state)
5798 {
5799         struct dm_crtc_state *cur = to_dm_crtc_state(state);
5800
5801         /* TODO Destroy dc_stream objects are stream object is flattened */
5802         if (cur->stream)
5803                 dc_stream_release(cur->stream);
5804
5805
5806         __drm_atomic_helper_crtc_destroy_state(state);
5807
5808
5809         kfree(state);
5810 }
5811
5812 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5813 {
5814         struct dm_crtc_state *state;
5815
5816         if (crtc->state)
5817                 dm_crtc_destroy_state(crtc, crtc->state);
5818
5819         state = kzalloc(sizeof(*state), GFP_KERNEL);
5820         if (WARN_ON(!state))
5821                 return;
5822
5823         __drm_atomic_helper_crtc_reset(crtc, &state->base);
5824 }
5825
5826 static struct drm_crtc_state *
5827 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5828 {
5829         struct dm_crtc_state *state, *cur;
5830
5831         cur = to_dm_crtc_state(crtc->state);
5832
5833         if (WARN_ON(!crtc->state))
5834                 return NULL;
5835
5836         state = kzalloc(sizeof(*state), GFP_KERNEL);
5837         if (!state)
5838                 return NULL;
5839
5840         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5841
5842         if (cur->stream) {
5843                 state->stream = cur->stream;
5844                 dc_stream_retain(state->stream);
5845         }
5846
5847         state->active_planes = cur->active_planes;
5848         state->vrr_infopacket = cur->vrr_infopacket;
5849         state->abm_level = cur->abm_level;
5850         state->vrr_supported = cur->vrr_supported;
5851         state->freesync_config = cur->freesync_config;
5852         state->cm_has_degamma = cur->cm_has_degamma;
5853         state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5854         /* TODO Duplicate dc_stream after objects are stream object is flattened */
5855
5856         return &state->base;
5857 }
5858
5859 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
5860 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
5861 {
5862         crtc_debugfs_init(crtc);
5863
5864         return 0;
5865 }
5866 #endif
5867
5868 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5869 {
5870         enum dc_irq_source irq_source;
5871         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5872         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5873         int rc;
5874
5875         irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5876
5877         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5878
5879         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
5880                       acrtc->crtc_id, enable ? "en" : "dis", rc);
5881         return rc;
5882 }
5883
5884 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5885 {
5886         enum dc_irq_source irq_source;
5887         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5888         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5889         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5890 #if defined(CONFIG_DRM_AMD_DC_DCN)
5891         struct amdgpu_display_manager *dm = &adev->dm;
5892         unsigned long flags;
5893 #endif
5894         int rc = 0;
5895
5896         if (enable) {
5897                 /* vblank irq on -> Only need vupdate irq in vrr mode */
5898                 if (amdgpu_dm_vrr_active(acrtc_state))
5899                         rc = dm_set_vupdate_irq(crtc, true);
5900         } else {
5901                 /* vblank irq off -> vupdate irq off */
5902                 rc = dm_set_vupdate_irq(crtc, false);
5903         }
5904
5905         if (rc)
5906                 return rc;
5907
5908         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5909
5910         if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5911                 return -EBUSY;
5912
5913         if (amdgpu_in_reset(adev))
5914                 return 0;
5915
5916 #if defined(CONFIG_DRM_AMD_DC_DCN)
5917         spin_lock_irqsave(&dm->vblank_lock, flags);
5918         dm->vblank_workqueue->dm = dm;
5919         dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
5920         dm->vblank_workqueue->enable = enable;
5921         spin_unlock_irqrestore(&dm->vblank_lock, flags);
5922         schedule_work(&dm->vblank_workqueue->mall_work);
5923 #endif
5924
5925         return 0;
5926 }
5927
5928 static int dm_enable_vblank(struct drm_crtc *crtc)
5929 {
5930         return dm_set_vblank(crtc, true);
5931 }
5932
5933 static void dm_disable_vblank(struct drm_crtc *crtc)
5934 {
5935         dm_set_vblank(crtc, false);
5936 }
5937
5938 /* Implemented only the options currently availible for the driver */
5939 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5940         .reset = dm_crtc_reset_state,
5941         .destroy = amdgpu_dm_crtc_destroy,
5942         .set_config = drm_atomic_helper_set_config,
5943         .page_flip = drm_atomic_helper_page_flip,
5944         .atomic_duplicate_state = dm_crtc_duplicate_state,
5945         .atomic_destroy_state = dm_crtc_destroy_state,
5946         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
5947         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5948         .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5949         .get_vblank_counter = amdgpu_get_vblank_counter_kms,
5950         .enable_vblank = dm_enable_vblank,
5951         .disable_vblank = dm_disable_vblank,
5952         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5953 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
5954         .late_register = amdgpu_dm_crtc_late_register,
5955 #endif
5956 };
5957
5958 static enum drm_connector_status
5959 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5960 {
5961         bool connected;
5962         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5963
5964         /*
5965          * Notes:
5966          * 1. This interface is NOT called in context of HPD irq.
5967          * 2. This interface *is called* in context of user-mode ioctl. Which
5968          * makes it a bad place for *any* MST-related activity.
5969          */
5970
5971         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5972             !aconnector->fake_enable)
5973                 connected = (aconnector->dc_sink != NULL);
5974         else
5975                 connected = (aconnector->base.force == DRM_FORCE_ON);
5976
5977         update_subconnector_property(aconnector);
5978
5979         return (connected ? connector_status_connected :
5980                         connector_status_disconnected);
5981 }
5982
5983 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5984                                             struct drm_connector_state *connector_state,
5985                                             struct drm_property *property,
5986                                             uint64_t val)
5987 {
5988         struct drm_device *dev = connector->dev;
5989         struct amdgpu_device *adev = drm_to_adev(dev);
5990         struct dm_connector_state *dm_old_state =
5991                 to_dm_connector_state(connector->state);
5992         struct dm_connector_state *dm_new_state =
5993                 to_dm_connector_state(connector_state);
5994
5995         int ret = -EINVAL;
5996
5997         if (property == dev->mode_config.scaling_mode_property) {
5998                 enum amdgpu_rmx_type rmx_type;
5999
6000                 switch (val) {
6001                 case DRM_MODE_SCALE_CENTER:
6002                         rmx_type = RMX_CENTER;
6003                         break;
6004                 case DRM_MODE_SCALE_ASPECT:
6005                         rmx_type = RMX_ASPECT;
6006                         break;
6007                 case DRM_MODE_SCALE_FULLSCREEN:
6008                         rmx_type = RMX_FULL;
6009                         break;
6010                 case DRM_MODE_SCALE_NONE:
6011                 default:
6012                         rmx_type = RMX_OFF;
6013                         break;
6014                 }
6015
6016                 if (dm_old_state->scaling == rmx_type)
6017                         return 0;
6018
6019                 dm_new_state->scaling = rmx_type;
6020                 ret = 0;
6021         } else if (property == adev->mode_info.underscan_hborder_property) {
6022                 dm_new_state->underscan_hborder = val;
6023                 ret = 0;
6024         } else if (property == adev->mode_info.underscan_vborder_property) {
6025                 dm_new_state->underscan_vborder = val;
6026                 ret = 0;
6027         } else if (property == adev->mode_info.underscan_property) {
6028                 dm_new_state->underscan_enable = val;
6029                 ret = 0;
6030         } else if (property == adev->mode_info.abm_level_property) {
6031                 dm_new_state->abm_level = val;
6032                 ret = 0;
6033         }
6034
6035         return ret;
6036 }
6037
6038 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6039                                             const struct drm_connector_state *state,
6040                                             struct drm_property *property,
6041                                             uint64_t *val)
6042 {
6043         struct drm_device *dev = connector->dev;
6044         struct amdgpu_device *adev = drm_to_adev(dev);
6045         struct dm_connector_state *dm_state =
6046                 to_dm_connector_state(state);
6047         int ret = -EINVAL;
6048
6049         if (property == dev->mode_config.scaling_mode_property) {
6050                 switch (dm_state->scaling) {
6051                 case RMX_CENTER:
6052                         *val = DRM_MODE_SCALE_CENTER;
6053                         break;
6054                 case RMX_ASPECT:
6055                         *val = DRM_MODE_SCALE_ASPECT;
6056                         break;
6057                 case RMX_FULL:
6058                         *val = DRM_MODE_SCALE_FULLSCREEN;
6059                         break;
6060                 case RMX_OFF:
6061                 default:
6062                         *val = DRM_MODE_SCALE_NONE;
6063                         break;
6064                 }
6065                 ret = 0;
6066         } else if (property == adev->mode_info.underscan_hborder_property) {
6067                 *val = dm_state->underscan_hborder;
6068                 ret = 0;
6069         } else if (property == adev->mode_info.underscan_vborder_property) {
6070                 *val = dm_state->underscan_vborder;
6071                 ret = 0;
6072         } else if (property == adev->mode_info.underscan_property) {
6073                 *val = dm_state->underscan_enable;
6074                 ret = 0;
6075         } else if (property == adev->mode_info.abm_level_property) {
6076                 *val = dm_state->abm_level;
6077                 ret = 0;
6078         }
6079
6080         return ret;
6081 }
6082
6083 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6084 {
6085         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6086
6087         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6088 }
6089
6090 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6091 {
6092         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6093         const struct dc_link *link = aconnector->dc_link;
6094         struct amdgpu_device *adev = drm_to_adev(connector->dev);
6095         struct amdgpu_display_manager *dm = &adev->dm;
6096
6097         /*
6098          * Call only if mst_mgr was iniitalized before since it's not done
6099          * for all connector types.
6100          */
6101         if (aconnector->mst_mgr.dev)
6102                 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6103
6104 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6105         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6106
6107         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
6108             link->type != dc_connection_none &&
6109             dm->backlight_dev) {
6110                 backlight_device_unregister(dm->backlight_dev);
6111                 dm->backlight_dev = NULL;
6112         }
6113 #endif
6114
6115         if (aconnector->dc_em_sink)
6116                 dc_sink_release(aconnector->dc_em_sink);
6117         aconnector->dc_em_sink = NULL;
6118         if (aconnector->dc_sink)
6119                 dc_sink_release(aconnector->dc_sink);
6120         aconnector->dc_sink = NULL;
6121
6122         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6123         drm_connector_unregister(connector);
6124         drm_connector_cleanup(connector);
6125         if (aconnector->i2c) {
6126                 i2c_del_adapter(&aconnector->i2c->base);
6127                 kfree(aconnector->i2c);
6128         }
6129         kfree(aconnector->dm_dp_aux.aux.name);
6130
6131         kfree(connector);
6132 }
6133
6134 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6135 {
6136         struct dm_connector_state *state =
6137                 to_dm_connector_state(connector->state);
6138
6139         if (connector->state)
6140                 __drm_atomic_helper_connector_destroy_state(connector->state);
6141
6142         kfree(state);
6143
6144         state = kzalloc(sizeof(*state), GFP_KERNEL);
6145
6146         if (state) {
6147                 state->scaling = RMX_OFF;
6148                 state->underscan_enable = false;
6149                 state->underscan_hborder = 0;
6150                 state->underscan_vborder = 0;
6151                 state->base.max_requested_bpc = 8;
6152                 state->vcpi_slots = 0;
6153                 state->pbn = 0;
6154                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6155                         state->abm_level = amdgpu_dm_abm_level;
6156
6157                 __drm_atomic_helper_connector_reset(connector, &state->base);
6158         }
6159 }
6160
6161 struct drm_connector_state *
6162 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6163 {
6164         struct dm_connector_state *state =
6165                 to_dm_connector_state(connector->state);
6166
6167         struct dm_connector_state *new_state =
6168                         kmemdup(state, sizeof(*state), GFP_KERNEL);
6169
6170         if (!new_state)
6171                 return NULL;
6172
6173         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6174
6175         new_state->freesync_capable = state->freesync_capable;
6176         new_state->abm_level = state->abm_level;
6177         new_state->scaling = state->scaling;
6178         new_state->underscan_enable = state->underscan_enable;
6179         new_state->underscan_hborder = state->underscan_hborder;
6180         new_state->underscan_vborder = state->underscan_vborder;
6181         new_state->vcpi_slots = state->vcpi_slots;
6182         new_state->pbn = state->pbn;
6183         return &new_state->base;
6184 }
6185
6186 static int
6187 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6188 {
6189         struct amdgpu_dm_connector *amdgpu_dm_connector =
6190                 to_amdgpu_dm_connector(connector);
6191         int r;
6192
6193         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6194             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6195                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6196                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6197                 if (r)
6198                         return r;
6199         }
6200
6201 #if defined(CONFIG_DEBUG_FS)
6202         connector_debugfs_init(amdgpu_dm_connector);
6203 #endif
6204
6205         return 0;
6206 }
6207
6208 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6209         .reset = amdgpu_dm_connector_funcs_reset,
6210         .detect = amdgpu_dm_connector_detect,
6211         .fill_modes = drm_helper_probe_single_connector_modes,
6212         .destroy = amdgpu_dm_connector_destroy,
6213         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6214         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6215         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6216         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6217         .late_register = amdgpu_dm_connector_late_register,
6218         .early_unregister = amdgpu_dm_connector_unregister
6219 };
6220
6221 static int get_modes(struct drm_connector *connector)
6222 {
6223         return amdgpu_dm_connector_get_modes(connector);
6224 }
6225
6226 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6227 {
6228         struct dc_sink_init_data init_params = {
6229                         .link = aconnector->dc_link,
6230                         .sink_signal = SIGNAL_TYPE_VIRTUAL
6231         };
6232         struct edid *edid;
6233
6234         if (!aconnector->base.edid_blob_ptr) {
6235                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6236                                 aconnector->base.name);
6237
6238                 aconnector->base.force = DRM_FORCE_OFF;
6239                 aconnector->base.override_edid = false;
6240                 return;
6241         }
6242
6243         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6244
6245         aconnector->edid = edid;
6246
6247         aconnector->dc_em_sink = dc_link_add_remote_sink(
6248                 aconnector->dc_link,
6249                 (uint8_t *)edid,
6250                 (edid->extensions + 1) * EDID_LENGTH,
6251                 &init_params);
6252
6253         if (aconnector->base.force == DRM_FORCE_ON) {
6254                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6255                 aconnector->dc_link->local_sink :
6256                 aconnector->dc_em_sink;
6257                 dc_sink_retain(aconnector->dc_sink);
6258         }
6259 }
6260
6261 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6262 {
6263         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6264
6265         /*
6266          * In case of headless boot with force on for DP managed connector
6267          * Those settings have to be != 0 to get initial modeset
6268          */
6269         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6270                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6271                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6272         }
6273
6274
6275         aconnector->base.override_edid = true;
6276         create_eml_sink(aconnector);
6277 }
6278
6279 static struct dc_stream_state *
6280 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6281                                 const struct drm_display_mode *drm_mode,
6282                                 const struct dm_connector_state *dm_state,
6283                                 const struct dc_stream_state *old_stream)
6284 {
6285         struct drm_connector *connector = &aconnector->base;
6286         struct amdgpu_device *adev = drm_to_adev(connector->dev);
6287         struct dc_stream_state *stream;
6288         const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6289         int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6290         enum dc_status dc_result = DC_OK;
6291
6292         do {
6293                 stream = create_stream_for_sink(aconnector, drm_mode,
6294                                                 dm_state, old_stream,
6295                                                 requested_bpc);
6296                 if (stream == NULL) {
6297                         DRM_ERROR("Failed to create stream for sink!\n");
6298                         break;
6299                 }
6300
6301                 dc_result = dc_validate_stream(adev->dm.dc, stream);
6302
6303                 if (dc_result != DC_OK) {
6304                         DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6305                                       drm_mode->hdisplay,
6306                                       drm_mode->vdisplay,
6307                                       drm_mode->clock,
6308                                       dc_result,
6309                                       dc_status_to_str(dc_result));
6310
6311                         dc_stream_release(stream);
6312                         stream = NULL;
6313                         requested_bpc -= 2; /* lower bpc to retry validation */
6314                 }
6315
6316         } while (stream == NULL && requested_bpc >= 6);
6317
6318         if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6319                 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6320
6321                 aconnector->force_yuv420_output = true;
6322                 stream = create_validate_stream_for_sink(aconnector, drm_mode,
6323                                                 dm_state, old_stream);
6324                 aconnector->force_yuv420_output = false;
6325         }
6326
6327         return stream;
6328 }
6329
6330 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6331                                    struct drm_display_mode *mode)
6332 {
6333         int result = MODE_ERROR;
6334         struct dc_sink *dc_sink;
6335         /* TODO: Unhardcode stream count */
6336         struct dc_stream_state *stream;
6337         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6338
6339         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6340                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6341                 return result;
6342
6343         /*
6344          * Only run this the first time mode_valid is called to initilialize
6345          * EDID mgmt
6346          */
6347         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6348                 !aconnector->dc_em_sink)
6349                 handle_edid_mgmt(aconnector);
6350
6351         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6352
6353         if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6354                                 aconnector->base.force != DRM_FORCE_ON) {
6355                 DRM_ERROR("dc_sink is NULL!\n");
6356                 goto fail;
6357         }
6358
6359         stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6360         if (stream) {
6361                 dc_stream_release(stream);
6362                 result = MODE_OK;
6363         }
6364
6365 fail:
6366         /* TODO: error handling*/
6367         return result;
6368 }
6369
6370 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6371                                 struct dc_info_packet *out)
6372 {
6373         struct hdmi_drm_infoframe frame;
6374         unsigned char buf[30]; /* 26 + 4 */
6375         ssize_t len;
6376         int ret, i;
6377
6378         memset(out, 0, sizeof(*out));
6379
6380         if (!state->hdr_output_metadata)
6381                 return 0;
6382
6383         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6384         if (ret)
6385                 return ret;
6386
6387         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6388         if (len < 0)
6389                 return (int)len;
6390
6391         /* Static metadata is a fixed 26 bytes + 4 byte header. */
6392         if (len != 30)
6393                 return -EINVAL;
6394
6395         /* Prepare the infopacket for DC. */
6396         switch (state->connector->connector_type) {
6397         case DRM_MODE_CONNECTOR_HDMIA:
6398                 out->hb0 = 0x87; /* type */
6399                 out->hb1 = 0x01; /* version */
6400                 out->hb2 = 0x1A; /* length */
6401                 out->sb[0] = buf[3]; /* checksum */
6402                 i = 1;
6403                 break;
6404
6405         case DRM_MODE_CONNECTOR_DisplayPort:
6406         case DRM_MODE_CONNECTOR_eDP:
6407                 out->hb0 = 0x00; /* sdp id, zero */
6408                 out->hb1 = 0x87; /* type */
6409                 out->hb2 = 0x1D; /* payload len - 1 */
6410                 out->hb3 = (0x13 << 2); /* sdp version */
6411                 out->sb[0] = 0x01; /* version */
6412                 out->sb[1] = 0x1A; /* length */
6413                 i = 2;
6414                 break;
6415
6416         default:
6417                 return -EINVAL;
6418         }
6419
6420         memcpy(&out->sb[i], &buf[4], 26);
6421         out->valid = true;
6422
6423         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6424                        sizeof(out->sb), false);
6425
6426         return 0;
6427 }
6428
6429 static bool
6430 is_hdr_metadata_different(const struct drm_connector_state *old_state,
6431                           const struct drm_connector_state *new_state)
6432 {
6433         struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
6434         struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
6435
6436         if (old_blob != new_blob) {
6437                 if (old_blob && new_blob &&
6438                     old_blob->length == new_blob->length)
6439                         return memcmp(old_blob->data, new_blob->data,
6440                                       old_blob->length);
6441
6442                 return true;
6443         }
6444
6445         return false;
6446 }
6447
6448 static int
6449 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6450                                  struct drm_atomic_state *state)
6451 {
6452         struct drm_connector_state *new_con_state =
6453                 drm_atomic_get_new_connector_state(state, conn);
6454         struct drm_connector_state *old_con_state =
6455                 drm_atomic_get_old_connector_state(state, conn);
6456         struct drm_crtc *crtc = new_con_state->crtc;
6457         struct drm_crtc_state *new_crtc_state;
6458         int ret;
6459
6460         trace_amdgpu_dm_connector_atomic_check(new_con_state);
6461
6462         if (!crtc)
6463                 return 0;
6464
6465         if (is_hdr_metadata_different(old_con_state, new_con_state)) {
6466                 struct dc_info_packet hdr_infopacket;
6467
6468                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6469                 if (ret)
6470                         return ret;
6471
6472                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6473                 if (IS_ERR(new_crtc_state))
6474                         return PTR_ERR(new_crtc_state);
6475
6476                 /*
6477                  * DC considers the stream backends changed if the
6478                  * static metadata changes. Forcing the modeset also
6479                  * gives a simple way for userspace to switch from
6480                  * 8bpc to 10bpc when setting the metadata to enter
6481                  * or exit HDR.
6482                  *
6483                  * Changing the static metadata after it's been
6484                  * set is permissible, however. So only force a
6485                  * modeset if we're entering or exiting HDR.
6486                  */
6487                 new_crtc_state->mode_changed =
6488                         !old_con_state->hdr_output_metadata ||
6489                         !new_con_state->hdr_output_metadata;
6490         }
6491
6492         return 0;
6493 }
6494
6495 static const struct drm_connector_helper_funcs
6496 amdgpu_dm_connector_helper_funcs = {
6497         /*
6498          * If hotplugging a second bigger display in FB Con mode, bigger resolution
6499          * modes will be filtered by drm_mode_validate_size(), and those modes
6500          * are missing after user start lightdm. So we need to renew modes list.
6501          * in get_modes call back, not just return the modes count
6502          */
6503         .get_modes = get_modes,
6504         .mode_valid = amdgpu_dm_connector_mode_valid,
6505         .atomic_check = amdgpu_dm_connector_atomic_check,
6506 };
6507
6508 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6509 {
6510 }
6511
6512 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6513 {
6514         struct drm_atomic_state *state = new_crtc_state->state;
6515         struct drm_plane *plane;
6516         int num_active = 0;
6517
6518         drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6519                 struct drm_plane_state *new_plane_state;
6520
6521                 /* Cursor planes are "fake". */
6522                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6523                         continue;
6524
6525                 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6526
6527                 if (!new_plane_state) {
6528                         /*
6529                          * The plane is enable on the CRTC and hasn't changed
6530                          * state. This means that it previously passed
6531                          * validation and is therefore enabled.
6532                          */
6533                         num_active += 1;
6534                         continue;
6535                 }
6536
6537                 /* We need a framebuffer to be considered enabled. */
6538                 num_active += (new_plane_state->fb != NULL);
6539         }
6540
6541         return num_active;
6542 }
6543
6544 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6545                                          struct drm_crtc_state *new_crtc_state)
6546 {
6547         struct dm_crtc_state *dm_new_crtc_state =
6548                 to_dm_crtc_state(new_crtc_state);
6549
6550         dm_new_crtc_state->active_planes = 0;
6551
6552         if (!dm_new_crtc_state->stream)
6553                 return;
6554
6555         dm_new_crtc_state->active_planes =
6556                 count_crtc_active_planes(new_crtc_state);
6557 }
6558
6559 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6560                                        struct drm_atomic_state *state)
6561 {
6562         struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6563                                                                           crtc);
6564         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6565         struct dc *dc = adev->dm.dc;
6566         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6567         int ret = -EINVAL;
6568
6569         trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6570
6571         dm_update_crtc_active_planes(crtc, crtc_state);
6572
6573         if (unlikely(!dm_crtc_state->stream &&
6574                      modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6575                 WARN_ON(1);
6576                 return ret;
6577         }
6578
6579         /*
6580          * We require the primary plane to be enabled whenever the CRTC is, otherwise
6581          * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6582          * planes are disabled, which is not supported by the hardware. And there is legacy
6583          * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6584          */
6585         if (crtc_state->enable &&
6586             !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6587                 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6588                 return -EINVAL;
6589         }
6590
6591         /* In some use cases, like reset, no stream is attached */
6592         if (!dm_crtc_state->stream)
6593                 return 0;
6594
6595         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6596                 return 0;
6597
6598         DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6599         return ret;
6600 }
6601
6602 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6603                                       const struct drm_display_mode *mode,
6604                                       struct drm_display_mode *adjusted_mode)
6605 {
6606         return true;
6607 }
6608
6609 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6610         .disable = dm_crtc_helper_disable,
6611         .atomic_check = dm_crtc_helper_atomic_check,
6612         .mode_fixup = dm_crtc_helper_mode_fixup,
6613         .get_scanout_position = amdgpu_crtc_get_scanout_position,
6614 };
6615
6616 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6617 {
6618
6619 }
6620
6621 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6622 {
6623         switch (display_color_depth) {
6624                 case COLOR_DEPTH_666:
6625                         return 6;
6626                 case COLOR_DEPTH_888:
6627                         return 8;
6628                 case COLOR_DEPTH_101010:
6629                         return 10;
6630                 case COLOR_DEPTH_121212:
6631                         return 12;
6632                 case COLOR_DEPTH_141414:
6633                         return 14;
6634                 case COLOR_DEPTH_161616:
6635                         return 16;
6636                 default:
6637                         break;
6638                 }
6639         return 0;
6640 }
6641
6642 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6643                                           struct drm_crtc_state *crtc_state,
6644                                           struct drm_connector_state *conn_state)
6645 {
6646         struct drm_atomic_state *state = crtc_state->state;
6647         struct drm_connector *connector = conn_state->connector;
6648         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6649         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6650         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6651         struct drm_dp_mst_topology_mgr *mst_mgr;
6652         struct drm_dp_mst_port *mst_port;
6653         enum dc_color_depth color_depth;
6654         int clock, bpp = 0;
6655         bool is_y420 = false;
6656
6657         if (!aconnector->port || !aconnector->dc_sink)
6658                 return 0;
6659
6660         mst_port = aconnector->port;
6661         mst_mgr = &aconnector->mst_port->mst_mgr;
6662
6663         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6664                 return 0;
6665
6666         if (!state->duplicated) {
6667                 int max_bpc = conn_state->max_requested_bpc;
6668                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6669                                 aconnector->force_yuv420_output;
6670                 color_depth = convert_color_depth_from_display_info(connector,
6671                                                                     is_y420,
6672                                                                     max_bpc);
6673                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6674                 clock = adjusted_mode->clock;
6675                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6676         }
6677         dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6678                                                                            mst_mgr,
6679                                                                            mst_port,
6680                                                                            dm_new_connector_state->pbn,
6681                                                                            dm_mst_get_pbn_divider(aconnector->dc_link));
6682         if (dm_new_connector_state->vcpi_slots < 0) {
6683                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6684                 return dm_new_connector_state->vcpi_slots;
6685         }
6686         return 0;
6687 }
6688
6689 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6690         .disable = dm_encoder_helper_disable,
6691         .atomic_check = dm_encoder_helper_atomic_check
6692 };
6693
6694 #if defined(CONFIG_DRM_AMD_DC_DCN)
6695 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6696                                             struct dc_state *dc_state)
6697 {
6698         struct dc_stream_state *stream = NULL;
6699         struct drm_connector *connector;
6700         struct drm_connector_state *new_con_state;
6701         struct amdgpu_dm_connector *aconnector;
6702         struct dm_connector_state *dm_conn_state;
6703         int i, j, clock, bpp;
6704         int vcpi, pbn_div, pbn = 0;
6705
6706         for_each_new_connector_in_state(state, connector, new_con_state, i) {
6707
6708                 aconnector = to_amdgpu_dm_connector(connector);
6709
6710                 if (!aconnector->port)
6711                         continue;
6712
6713                 if (!new_con_state || !new_con_state->crtc)
6714                         continue;
6715
6716                 dm_conn_state = to_dm_connector_state(new_con_state);
6717
6718                 for (j = 0; j < dc_state->stream_count; j++) {
6719                         stream = dc_state->streams[j];
6720                         if (!stream)
6721                                 continue;
6722
6723                         if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6724                                 break;
6725
6726                         stream = NULL;
6727                 }
6728
6729                 if (!stream)
6730                         continue;
6731
6732                 if (stream->timing.flags.DSC != 1) {
6733                         drm_dp_mst_atomic_enable_dsc(state,
6734                                                      aconnector->port,
6735                                                      dm_conn_state->pbn,
6736                                                      0,
6737                                                      false);
6738                         continue;
6739                 }
6740
6741                 pbn_div = dm_mst_get_pbn_divider(stream->link);
6742                 bpp = stream->timing.dsc_cfg.bits_per_pixel;
6743                 clock = stream->timing.pix_clk_100hz / 10;
6744                 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6745                 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6746                                                     aconnector->port,
6747                                                     pbn, pbn_div,
6748                                                     true);
6749                 if (vcpi < 0)
6750                         return vcpi;
6751
6752                 dm_conn_state->pbn = pbn;
6753                 dm_conn_state->vcpi_slots = vcpi;
6754         }
6755         return 0;
6756 }
6757 #endif
6758
6759 static void dm_drm_plane_reset(struct drm_plane *plane)
6760 {
6761         struct dm_plane_state *amdgpu_state = NULL;
6762
6763         if (plane->state)
6764                 plane->funcs->atomic_destroy_state(plane, plane->state);
6765
6766         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6767         WARN_ON(amdgpu_state == NULL);
6768
6769         if (amdgpu_state)
6770                 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6771 }
6772
6773 static struct drm_plane_state *
6774 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6775 {
6776         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6777
6778         old_dm_plane_state = to_dm_plane_state(plane->state);
6779         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6780         if (!dm_plane_state)
6781                 return NULL;
6782
6783         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6784
6785         if (old_dm_plane_state->dc_state) {
6786                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6787                 dc_plane_state_retain(dm_plane_state->dc_state);
6788         }
6789
6790         return &dm_plane_state->base;
6791 }
6792
6793 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6794                                 struct drm_plane_state *state)
6795 {
6796         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6797
6798         if (dm_plane_state->dc_state)
6799                 dc_plane_state_release(dm_plane_state->dc_state);
6800
6801         drm_atomic_helper_plane_destroy_state(plane, state);
6802 }
6803
6804 static const struct drm_plane_funcs dm_plane_funcs = {
6805         .update_plane   = drm_atomic_helper_update_plane,
6806         .disable_plane  = drm_atomic_helper_disable_plane,
6807         .destroy        = drm_primary_helper_destroy,
6808         .reset = dm_drm_plane_reset,
6809         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6810         .atomic_destroy_state = dm_drm_plane_destroy_state,
6811         .format_mod_supported = dm_plane_format_mod_supported,
6812 };
6813
6814 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6815                                       struct drm_plane_state *new_state)
6816 {
6817         struct amdgpu_framebuffer *afb;
6818         struct drm_gem_object *obj;
6819         struct amdgpu_device *adev;
6820         struct amdgpu_bo *rbo;
6821         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6822         struct list_head list;
6823         struct ttm_validate_buffer tv;
6824         struct ww_acquire_ctx ticket;
6825         uint32_t domain;
6826         int r;
6827
6828         if (!new_state->fb) {
6829                 DRM_DEBUG_KMS("No FB bound\n");
6830                 return 0;
6831         }
6832
6833         afb = to_amdgpu_framebuffer(new_state->fb);
6834         obj = new_state->fb->obj[0];
6835         rbo = gem_to_amdgpu_bo(obj);
6836         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6837         INIT_LIST_HEAD(&list);
6838
6839         tv.bo = &rbo->tbo;
6840         tv.num_shared = 1;
6841         list_add(&tv.head, &list);
6842
6843         r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6844         if (r) {
6845                 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6846                 return r;
6847         }
6848
6849         if (plane->type != DRM_PLANE_TYPE_CURSOR)
6850                 domain = amdgpu_display_supported_domains(adev, rbo->flags);
6851         else
6852                 domain = AMDGPU_GEM_DOMAIN_VRAM;
6853
6854         r = amdgpu_bo_pin(rbo, domain);
6855         if (unlikely(r != 0)) {
6856                 if (r != -ERESTARTSYS)
6857                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6858                 ttm_eu_backoff_reservation(&ticket, &list);
6859                 return r;
6860         }
6861
6862         r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6863         if (unlikely(r != 0)) {
6864                 amdgpu_bo_unpin(rbo);
6865                 ttm_eu_backoff_reservation(&ticket, &list);
6866                 DRM_ERROR("%p bind failed\n", rbo);
6867                 return r;
6868         }
6869
6870         ttm_eu_backoff_reservation(&ticket, &list);
6871
6872         afb->address = amdgpu_bo_gpu_offset(rbo);
6873
6874         amdgpu_bo_ref(rbo);
6875
6876         /**
6877          * We don't do surface updates on planes that have been newly created,
6878          * but we also don't have the afb->address during atomic check.
6879          *
6880          * Fill in buffer attributes depending on the address here, but only on
6881          * newly created planes since they're not being used by DC yet and this
6882          * won't modify global state.
6883          */
6884         dm_plane_state_old = to_dm_plane_state(plane->state);
6885         dm_plane_state_new = to_dm_plane_state(new_state);
6886
6887         if (dm_plane_state_new->dc_state &&
6888             dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6889                 struct dc_plane_state *plane_state =
6890                         dm_plane_state_new->dc_state;
6891                 bool force_disable_dcc = !plane_state->dcc.enable;
6892
6893                 fill_plane_buffer_attributes(
6894                         adev, afb, plane_state->format, plane_state->rotation,
6895                         afb->tiling_flags,
6896                         &plane_state->tiling_info, &plane_state->plane_size,
6897                         &plane_state->dcc, &plane_state->address,
6898                         afb->tmz_surface, force_disable_dcc);
6899         }
6900
6901         return 0;
6902 }
6903
6904 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6905                                        struct drm_plane_state *old_state)
6906 {
6907         struct amdgpu_bo *rbo;
6908         int r;
6909
6910         if (!old_state->fb)
6911                 return;
6912
6913         rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6914         r = amdgpu_bo_reserve(rbo, false);
6915         if (unlikely(r)) {
6916                 DRM_ERROR("failed to reserve rbo before unpin\n");
6917                 return;
6918         }
6919
6920         amdgpu_bo_unpin(rbo);
6921         amdgpu_bo_unreserve(rbo);
6922         amdgpu_bo_unref(&rbo);
6923 }
6924
6925 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6926                                        struct drm_crtc_state *new_crtc_state)
6927 {
6928         struct drm_framebuffer *fb = state->fb;
6929         int min_downscale, max_upscale;
6930         int min_scale = 0;
6931         int max_scale = INT_MAX;
6932
6933         /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6934         if (fb && state->crtc) {
6935                 /* Validate viewport to cover the case when only the position changes */
6936                 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
6937                         int viewport_width = state->crtc_w;
6938                         int viewport_height = state->crtc_h;
6939
6940                         if (state->crtc_x < 0)
6941                                 viewport_width += state->crtc_x;
6942                         else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
6943                                 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
6944
6945                         if (state->crtc_y < 0)
6946                                 viewport_height += state->crtc_y;
6947                         else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
6948                                 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
6949
6950                         if (viewport_width < 0 || viewport_height < 0) {
6951                                 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
6952                                 return -EINVAL;
6953                         } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
6954                                 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
6955                                 return -EINVAL;
6956                         } else if (viewport_height < MIN_VIEWPORT_SIZE) {
6957                                 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
6958                                 return -EINVAL;
6959                         }
6960
6961                 }
6962
6963                 /* Get min/max allowed scaling factors from plane caps. */
6964                 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6965                                              &min_downscale, &max_upscale);
6966                 /*
6967                  * Convert to drm convention: 16.16 fixed point, instead of dc's
6968                  * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6969                  * dst/src, so min_scale = 1.0 / max_upscale, etc.
6970                  */
6971                 min_scale = (1000 << 16) / max_upscale;
6972                 max_scale = (1000 << 16) / min_downscale;
6973         }
6974
6975         return drm_atomic_helper_check_plane_state(
6976                 state, new_crtc_state, min_scale, max_scale, true, true);
6977 }
6978
6979 static int dm_plane_atomic_check(struct drm_plane *plane,
6980                                  struct drm_atomic_state *state)
6981 {
6982         struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
6983                                                                                  plane);
6984         struct amdgpu_device *adev = drm_to_adev(plane->dev);
6985         struct dc *dc = adev->dm.dc;
6986         struct dm_plane_state *dm_plane_state;
6987         struct dc_scaling_info scaling_info;
6988         struct drm_crtc_state *new_crtc_state;
6989         int ret;
6990
6991         trace_amdgpu_dm_plane_atomic_check(new_plane_state);
6992
6993         dm_plane_state = to_dm_plane_state(new_plane_state);
6994
6995         if (!dm_plane_state->dc_state)
6996                 return 0;
6997
6998         new_crtc_state =
6999                 drm_atomic_get_new_crtc_state(state,
7000                                               new_plane_state->crtc);
7001         if (!new_crtc_state)
7002                 return -EINVAL;
7003
7004         ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7005         if (ret)
7006                 return ret;
7007
7008         ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
7009         if (ret)
7010                 return ret;
7011
7012         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7013                 return 0;
7014
7015         return -EINVAL;
7016 }
7017
7018 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7019                                        struct drm_atomic_state *state)
7020 {
7021         /* Only support async updates on cursor planes. */
7022         if (plane->type != DRM_PLANE_TYPE_CURSOR)
7023                 return -EINVAL;
7024
7025         return 0;
7026 }
7027
7028 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7029                                          struct drm_atomic_state *state)
7030 {
7031         struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7032                                                                            plane);
7033         struct drm_plane_state *old_state =
7034                 drm_atomic_get_old_plane_state(state, plane);
7035
7036         trace_amdgpu_dm_atomic_update_cursor(new_state);
7037
7038         swap(plane->state->fb, new_state->fb);
7039
7040         plane->state->src_x = new_state->src_x;
7041         plane->state->src_y = new_state->src_y;
7042         plane->state->src_w = new_state->src_w;
7043         plane->state->src_h = new_state->src_h;
7044         plane->state->crtc_x = new_state->crtc_x;
7045         plane->state->crtc_y = new_state->crtc_y;
7046         plane->state->crtc_w = new_state->crtc_w;
7047         plane->state->crtc_h = new_state->crtc_h;
7048
7049         handle_cursor_update(plane, old_state);
7050 }
7051
7052 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7053         .prepare_fb = dm_plane_helper_prepare_fb,
7054         .cleanup_fb = dm_plane_helper_cleanup_fb,
7055         .atomic_check = dm_plane_atomic_check,
7056         .atomic_async_check = dm_plane_atomic_async_check,
7057         .atomic_async_update = dm_plane_atomic_async_update
7058 };
7059
7060 /*
7061  * TODO: these are currently initialized to rgb formats only.
7062  * For future use cases we should either initialize them dynamically based on
7063  * plane capabilities, or initialize this array to all formats, so internal drm
7064  * check will succeed, and let DC implement proper check
7065  */
7066 static const uint32_t rgb_formats[] = {
7067         DRM_FORMAT_XRGB8888,
7068         DRM_FORMAT_ARGB8888,
7069         DRM_FORMAT_RGBA8888,
7070         DRM_FORMAT_XRGB2101010,
7071         DRM_FORMAT_XBGR2101010,
7072         DRM_FORMAT_ARGB2101010,
7073         DRM_FORMAT_ABGR2101010,
7074         DRM_FORMAT_XBGR8888,
7075         DRM_FORMAT_ABGR8888,
7076         DRM_FORMAT_RGB565,
7077 };
7078
7079 static const uint32_t overlay_formats[] = {
7080         DRM_FORMAT_XRGB8888,
7081         DRM_FORMAT_ARGB8888,
7082         DRM_FORMAT_RGBA8888,
7083         DRM_FORMAT_XBGR8888,
7084         DRM_FORMAT_ABGR8888,
7085         DRM_FORMAT_RGB565
7086 };
7087
7088 static const u32 cursor_formats[] = {
7089         DRM_FORMAT_ARGB8888
7090 };
7091
7092 static int get_plane_formats(const struct drm_plane *plane,
7093                              const struct dc_plane_cap *plane_cap,
7094                              uint32_t *formats, int max_formats)
7095 {
7096         int i, num_formats = 0;
7097
7098         /*
7099          * TODO: Query support for each group of formats directly from
7100          * DC plane caps. This will require adding more formats to the
7101          * caps list.
7102          */
7103
7104         switch (plane->type) {
7105         case DRM_PLANE_TYPE_PRIMARY:
7106                 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7107                         if (num_formats >= max_formats)
7108                                 break;
7109
7110                         formats[num_formats++] = rgb_formats[i];
7111                 }
7112
7113                 if (plane_cap && plane_cap->pixel_format_support.nv12)
7114                         formats[num_formats++] = DRM_FORMAT_NV12;
7115                 if (plane_cap && plane_cap->pixel_format_support.p010)
7116                         formats[num_formats++] = DRM_FORMAT_P010;
7117                 if (plane_cap && plane_cap->pixel_format_support.fp16) {
7118                         formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7119                         formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7120                         formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7121                         formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7122                 }
7123                 break;
7124
7125         case DRM_PLANE_TYPE_OVERLAY:
7126                 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7127                         if (num_formats >= max_formats)
7128                                 break;
7129
7130                         formats[num_formats++] = overlay_formats[i];
7131                 }
7132                 break;
7133
7134         case DRM_PLANE_TYPE_CURSOR:
7135                 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7136                         if (num_formats >= max_formats)
7137                                 break;
7138
7139                         formats[num_formats++] = cursor_formats[i];
7140                 }
7141                 break;
7142         }
7143
7144         return num_formats;
7145 }
7146
7147 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7148                                 struct drm_plane *plane,
7149                                 unsigned long possible_crtcs,
7150                                 const struct dc_plane_cap *plane_cap)
7151 {
7152         uint32_t formats[32];
7153         int num_formats;
7154         int res = -EPERM;
7155         unsigned int supported_rotations;
7156         uint64_t *modifiers = NULL;
7157
7158         num_formats = get_plane_formats(plane, plane_cap, formats,
7159                                         ARRAY_SIZE(formats));
7160
7161         res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7162         if (res)
7163                 return res;
7164
7165         res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7166                                        &dm_plane_funcs, formats, num_formats,
7167                                        modifiers, plane->type, NULL);
7168         kfree(modifiers);
7169         if (res)
7170                 return res;
7171
7172         if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7173             plane_cap && plane_cap->per_pixel_alpha) {
7174                 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7175                                           BIT(DRM_MODE_BLEND_PREMULTI);
7176
7177                 drm_plane_create_alpha_property(plane);
7178                 drm_plane_create_blend_mode_property(plane, blend_caps);
7179         }
7180
7181         if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7182             plane_cap &&
7183             (plane_cap->pixel_format_support.nv12 ||
7184              plane_cap->pixel_format_support.p010)) {
7185                 /* This only affects YUV formats. */
7186                 drm_plane_create_color_properties(
7187                         plane,
7188                         BIT(DRM_COLOR_YCBCR_BT601) |
7189                         BIT(DRM_COLOR_YCBCR_BT709) |
7190                         BIT(DRM_COLOR_YCBCR_BT2020),
7191                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7192                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7193                         DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7194         }
7195
7196         supported_rotations =
7197                 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7198                 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7199
7200         if (dm->adev->asic_type >= CHIP_BONAIRE &&
7201             plane->type != DRM_PLANE_TYPE_CURSOR)
7202                 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7203                                                    supported_rotations);
7204
7205         drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7206
7207         /* Create (reset) the plane state */
7208         if (plane->funcs->reset)
7209                 plane->funcs->reset(plane);
7210
7211         return 0;
7212 }
7213
7214 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7215                                struct drm_plane *plane,
7216                                uint32_t crtc_index)
7217 {
7218         struct amdgpu_crtc *acrtc = NULL;
7219         struct drm_plane *cursor_plane;
7220
7221         int res = -ENOMEM;
7222
7223         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7224         if (!cursor_plane)
7225                 goto fail;
7226
7227         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7228         res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7229
7230         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7231         if (!acrtc)
7232                 goto fail;
7233
7234         res = drm_crtc_init_with_planes(
7235                         dm->ddev,
7236                         &acrtc->base,
7237                         plane,
7238                         cursor_plane,
7239                         &amdgpu_dm_crtc_funcs, NULL);
7240
7241         if (res)
7242                 goto fail;
7243
7244         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7245
7246         /* Create (reset) the plane state */
7247         if (acrtc->base.funcs->reset)
7248                 acrtc->base.funcs->reset(&acrtc->base);
7249
7250         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7251         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7252
7253         acrtc->crtc_id = crtc_index;
7254         acrtc->base.enabled = false;
7255         acrtc->otg_inst = -1;
7256
7257         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7258         drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7259                                    true, MAX_COLOR_LUT_ENTRIES);
7260         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7261
7262         return 0;
7263
7264 fail:
7265         kfree(acrtc);
7266         kfree(cursor_plane);
7267         return res;
7268 }
7269
7270
7271 static int to_drm_connector_type(enum signal_type st)
7272 {
7273         switch (st) {
7274         case SIGNAL_TYPE_HDMI_TYPE_A:
7275                 return DRM_MODE_CONNECTOR_HDMIA;
7276         case SIGNAL_TYPE_EDP:
7277                 return DRM_MODE_CONNECTOR_eDP;
7278         case SIGNAL_TYPE_LVDS:
7279                 return DRM_MODE_CONNECTOR_LVDS;
7280         case SIGNAL_TYPE_RGB:
7281                 return DRM_MODE_CONNECTOR_VGA;
7282         case SIGNAL_TYPE_DISPLAY_PORT:
7283         case SIGNAL_TYPE_DISPLAY_PORT_MST:
7284                 return DRM_MODE_CONNECTOR_DisplayPort;
7285         case SIGNAL_TYPE_DVI_DUAL_LINK:
7286         case SIGNAL_TYPE_DVI_SINGLE_LINK:
7287                 return DRM_MODE_CONNECTOR_DVID;
7288         case SIGNAL_TYPE_VIRTUAL:
7289                 return DRM_MODE_CONNECTOR_VIRTUAL;
7290
7291         default:
7292                 return DRM_MODE_CONNECTOR_Unknown;
7293         }
7294 }
7295
7296 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7297 {
7298         struct drm_encoder *encoder;
7299
7300         /* There is only one encoder per connector */
7301         drm_connector_for_each_possible_encoder(connector, encoder)
7302                 return encoder;
7303
7304         return NULL;
7305 }
7306
7307 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7308 {
7309         struct drm_encoder *encoder;
7310         struct amdgpu_encoder *amdgpu_encoder;
7311
7312         encoder = amdgpu_dm_connector_to_encoder(connector);
7313
7314         if (encoder == NULL)
7315                 return;
7316
7317         amdgpu_encoder = to_amdgpu_encoder(encoder);
7318
7319         amdgpu_encoder->native_mode.clock = 0;
7320
7321         if (!list_empty(&connector->probed_modes)) {
7322                 struct drm_display_mode *preferred_mode = NULL;
7323
7324                 list_for_each_entry(preferred_mode,
7325                                     &connector->probed_modes,
7326                                     head) {
7327                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7328                                 amdgpu_encoder->native_mode = *preferred_mode;
7329
7330                         break;
7331                 }
7332
7333         }
7334 }
7335
7336 static struct drm_display_mode *
7337 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7338                              char *name,
7339                              int hdisplay, int vdisplay)
7340 {
7341         struct drm_device *dev = encoder->dev;
7342         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7343         struct drm_display_mode *mode = NULL;
7344         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7345
7346         mode = drm_mode_duplicate(dev, native_mode);
7347
7348         if (mode == NULL)
7349                 return NULL;
7350
7351         mode->hdisplay = hdisplay;
7352         mode->vdisplay = vdisplay;
7353         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7354         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7355
7356         return mode;
7357
7358 }
7359
7360 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7361                                                  struct drm_connector *connector)
7362 {
7363         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7364         struct drm_display_mode *mode = NULL;
7365         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7366         struct amdgpu_dm_connector *amdgpu_dm_connector =
7367                                 to_amdgpu_dm_connector(connector);
7368         int i;
7369         int n;
7370         struct mode_size {
7371                 char name[DRM_DISPLAY_MODE_LEN];
7372                 int w;
7373                 int h;
7374         } common_modes[] = {
7375                 {  "640x480",  640,  480},
7376                 {  "800x600",  800,  600},
7377                 { "1024x768", 1024,  768},
7378                 { "1280x720", 1280,  720},
7379                 { "1280x800", 1280,  800},
7380                 {"1280x1024", 1280, 1024},
7381                 { "1440x900", 1440,  900},
7382                 {"1680x1050", 1680, 1050},
7383                 {"1600x1200", 1600, 1200},
7384                 {"1920x1080", 1920, 1080},
7385                 {"1920x1200", 1920, 1200}
7386         };
7387
7388         n = ARRAY_SIZE(common_modes);
7389
7390         for (i = 0; i < n; i++) {
7391                 struct drm_display_mode *curmode = NULL;
7392                 bool mode_existed = false;
7393
7394                 if (common_modes[i].w > native_mode->hdisplay ||
7395                     common_modes[i].h > native_mode->vdisplay ||
7396                    (common_modes[i].w == native_mode->hdisplay &&
7397                     common_modes[i].h == native_mode->vdisplay))
7398                         continue;
7399
7400                 list_for_each_entry(curmode, &connector->probed_modes, head) {
7401                         if (common_modes[i].w == curmode->hdisplay &&
7402                             common_modes[i].h == curmode->vdisplay) {
7403                                 mode_existed = true;
7404                                 break;
7405                         }
7406                 }
7407
7408                 if (mode_existed)
7409                         continue;
7410
7411                 mode = amdgpu_dm_create_common_mode(encoder,
7412                                 common_modes[i].name, common_modes[i].w,
7413                                 common_modes[i].h);
7414                 drm_mode_probed_add(connector, mode);
7415                 amdgpu_dm_connector->num_modes++;
7416         }
7417 }
7418
7419 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7420                                               struct edid *edid)
7421 {
7422         struct amdgpu_dm_connector *amdgpu_dm_connector =
7423                         to_amdgpu_dm_connector(connector);
7424
7425         if (edid) {
7426                 /* empty probed_modes */
7427                 INIT_LIST_HEAD(&connector->probed_modes);
7428                 amdgpu_dm_connector->num_modes =
7429                                 drm_add_edid_modes(connector, edid);
7430
7431                 /* sorting the probed modes before calling function
7432                  * amdgpu_dm_get_native_mode() since EDID can have
7433                  * more than one preferred mode. The modes that are
7434                  * later in the probed mode list could be of higher
7435                  * and preferred resolution. For example, 3840x2160
7436                  * resolution in base EDID preferred timing and 4096x2160
7437                  * preferred resolution in DID extension block later.
7438                  */
7439                 drm_mode_sort(&connector->probed_modes);
7440                 amdgpu_dm_get_native_mode(connector);
7441
7442                 /* Freesync capabilities are reset by calling
7443                  * drm_add_edid_modes() and need to be
7444                  * restored here.
7445                  */
7446                 amdgpu_dm_update_freesync_caps(connector, edid);
7447         } else {
7448                 amdgpu_dm_connector->num_modes = 0;
7449         }
7450 }
7451
7452 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7453                               struct drm_display_mode *mode)
7454 {
7455         struct drm_display_mode *m;
7456
7457         list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7458                 if (drm_mode_equal(m, mode))
7459                         return true;
7460         }
7461
7462         return false;
7463 }
7464
7465 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7466 {
7467         const struct drm_display_mode *m;
7468         struct drm_display_mode *new_mode;
7469         uint i;
7470         uint32_t new_modes_count = 0;
7471
7472         /* Standard FPS values
7473          *
7474          * 23.976   - TV/NTSC
7475          * 24       - Cinema
7476          * 25       - TV/PAL
7477          * 29.97    - TV/NTSC
7478          * 30       - TV/NTSC
7479          * 48       - Cinema HFR
7480          * 50       - TV/PAL
7481          * 60       - Commonly used
7482          * 48,72,96 - Multiples of 24
7483          */
7484         const uint32_t common_rates[] = { 23976, 24000, 25000, 29970, 30000,
7485                                          48000, 50000, 60000, 72000, 96000 };
7486
7487         /*
7488          * Find mode with highest refresh rate with the same resolution
7489          * as the preferred mode. Some monitors report a preferred mode
7490          * with lower resolution than the highest refresh rate supported.
7491          */
7492
7493         m = get_highest_refresh_rate_mode(aconnector, true);
7494         if (!m)
7495                 return 0;
7496
7497         for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7498                 uint64_t target_vtotal, target_vtotal_diff;
7499                 uint64_t num, den;
7500
7501                 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7502                         continue;
7503
7504                 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7505                     common_rates[i] > aconnector->max_vfreq * 1000)
7506                         continue;
7507
7508                 num = (unsigned long long)m->clock * 1000 * 1000;
7509                 den = common_rates[i] * (unsigned long long)m->htotal;
7510                 target_vtotal = div_u64(num, den);
7511                 target_vtotal_diff = target_vtotal - m->vtotal;
7512
7513                 /* Check for illegal modes */
7514                 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7515                     m->vsync_end + target_vtotal_diff < m->vsync_start ||
7516                     m->vtotal + target_vtotal_diff < m->vsync_end)
7517                         continue;
7518
7519                 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7520                 if (!new_mode)
7521                         goto out;
7522
7523                 new_mode->vtotal += (u16)target_vtotal_diff;
7524                 new_mode->vsync_start += (u16)target_vtotal_diff;
7525                 new_mode->vsync_end += (u16)target_vtotal_diff;
7526                 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7527                 new_mode->type |= DRM_MODE_TYPE_DRIVER;
7528
7529                 if (!is_duplicate_mode(aconnector, new_mode)) {
7530                         drm_mode_probed_add(&aconnector->base, new_mode);
7531                         new_modes_count += 1;
7532                 } else
7533                         drm_mode_destroy(aconnector->base.dev, new_mode);
7534         }
7535  out:
7536         return new_modes_count;
7537 }
7538
7539 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7540                                                    struct edid *edid)
7541 {
7542         struct amdgpu_dm_connector *amdgpu_dm_connector =
7543                 to_amdgpu_dm_connector(connector);
7544
7545         if (!(amdgpu_freesync_vid_mode && edid))
7546                 return;
7547
7548         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7549                 amdgpu_dm_connector->num_modes +=
7550                         add_fs_modes(amdgpu_dm_connector);
7551 }
7552
7553 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
7554 {
7555         struct amdgpu_dm_connector *amdgpu_dm_connector =
7556                         to_amdgpu_dm_connector(connector);
7557         struct drm_encoder *encoder;
7558         struct edid *edid = amdgpu_dm_connector->edid;
7559
7560         encoder = amdgpu_dm_connector_to_encoder(connector);
7561
7562         if (!drm_edid_is_valid(edid)) {
7563                 amdgpu_dm_connector->num_modes =
7564                                 drm_add_modes_noedid(connector, 640, 480);
7565         } else {
7566                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
7567                 amdgpu_dm_connector_add_common_modes(encoder, connector);
7568                 amdgpu_dm_connector_add_freesync_modes(connector, edid);
7569         }
7570         amdgpu_dm_fbc_init(connector);
7571
7572         return amdgpu_dm_connector->num_modes;
7573 }
7574
7575 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7576                                      struct amdgpu_dm_connector *aconnector,
7577                                      int connector_type,
7578                                      struct dc_link *link,
7579                                      int link_index)
7580 {
7581         struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7582
7583         /*
7584          * Some of the properties below require access to state, like bpc.
7585          * Allocate some default initial connector state with our reset helper.
7586          */
7587         if (aconnector->base.funcs->reset)
7588                 aconnector->base.funcs->reset(&aconnector->base);
7589
7590         aconnector->connector_id = link_index;
7591         aconnector->dc_link = link;
7592         aconnector->base.interlace_allowed = false;
7593         aconnector->base.doublescan_allowed = false;
7594         aconnector->base.stereo_allowed = false;
7595         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7596         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7597         aconnector->audio_inst = -1;
7598         mutex_init(&aconnector->hpd_lock);
7599
7600         /*
7601          * configure support HPD hot plug connector_>polled default value is 0
7602          * which means HPD hot plug not supported
7603          */
7604         switch (connector_type) {
7605         case DRM_MODE_CONNECTOR_HDMIA:
7606                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7607                 aconnector->base.ycbcr_420_allowed =
7608                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7609                 break;
7610         case DRM_MODE_CONNECTOR_DisplayPort:
7611                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7612                 aconnector->base.ycbcr_420_allowed =
7613                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
7614                 break;
7615         case DRM_MODE_CONNECTOR_DVID:
7616                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7617                 break;
7618         default:
7619                 break;
7620         }
7621
7622         drm_object_attach_property(&aconnector->base.base,
7623                                 dm->ddev->mode_config.scaling_mode_property,
7624                                 DRM_MODE_SCALE_NONE);
7625
7626         drm_object_attach_property(&aconnector->base.base,
7627                                 adev->mode_info.underscan_property,
7628                                 UNDERSCAN_OFF);
7629         drm_object_attach_property(&aconnector->base.base,
7630                                 adev->mode_info.underscan_hborder_property,
7631                                 0);
7632         drm_object_attach_property(&aconnector->base.base,
7633                                 adev->mode_info.underscan_vborder_property,
7634                                 0);
7635
7636         if (!aconnector->mst_port)
7637                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7638
7639         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
7640         aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7641         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7642
7643         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7644             (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7645                 drm_object_attach_property(&aconnector->base.base,
7646                                 adev->mode_info.abm_level_property, 0);
7647         }
7648
7649         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7650             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7651             connector_type == DRM_MODE_CONNECTOR_eDP) {
7652                 drm_object_attach_property(
7653                         &aconnector->base.base,
7654                         dm->ddev->mode_config.hdr_output_metadata_property, 0);
7655
7656                 if (!aconnector->mst_port)
7657                         drm_connector_attach_vrr_capable_property(&aconnector->base);
7658
7659 #ifdef CONFIG_DRM_AMD_DC_HDCP
7660                 if (adev->dm.hdcp_workqueue)
7661                         drm_connector_attach_content_protection_property(&aconnector->base, true);
7662 #endif
7663         }
7664 }
7665
7666 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7667                               struct i2c_msg *msgs, int num)
7668 {
7669         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7670         struct ddc_service *ddc_service = i2c->ddc_service;
7671         struct i2c_command cmd;
7672         int i;
7673         int result = -EIO;
7674
7675         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7676
7677         if (!cmd.payloads)
7678                 return result;
7679
7680         cmd.number_of_payloads = num;
7681         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7682         cmd.speed = 100;
7683
7684         for (i = 0; i < num; i++) {
7685                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7686                 cmd.payloads[i].address = msgs[i].addr;
7687                 cmd.payloads[i].length = msgs[i].len;
7688                 cmd.payloads[i].data = msgs[i].buf;
7689         }
7690
7691         if (dc_submit_i2c(
7692                         ddc_service->ctx->dc,
7693                         ddc_service->ddc_pin->hw_info.ddc_channel,
7694                         &cmd))
7695                 result = num;
7696
7697         kfree(cmd.payloads);
7698         return result;
7699 }
7700
7701 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7702 {
7703         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7704 }
7705
7706 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7707         .master_xfer = amdgpu_dm_i2c_xfer,
7708         .functionality = amdgpu_dm_i2c_func,
7709 };
7710
7711 static struct amdgpu_i2c_adapter *
7712 create_i2c(struct ddc_service *ddc_service,
7713            int link_index,
7714            int *res)
7715 {
7716         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7717         struct amdgpu_i2c_adapter *i2c;
7718
7719         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7720         if (!i2c)
7721                 return NULL;
7722         i2c->base.owner = THIS_MODULE;
7723         i2c->base.class = I2C_CLASS_DDC;
7724         i2c->base.dev.parent = &adev->pdev->dev;
7725         i2c->base.algo = &amdgpu_dm_i2c_algo;
7726         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7727         i2c_set_adapdata(&i2c->base, i2c);
7728         i2c->ddc_service = ddc_service;
7729         i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7730
7731         return i2c;
7732 }
7733
7734
7735 /*
7736  * Note: this function assumes that dc_link_detect() was called for the
7737  * dc_link which will be represented by this aconnector.
7738  */
7739 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7740                                     struct amdgpu_dm_connector *aconnector,
7741                                     uint32_t link_index,
7742                                     struct amdgpu_encoder *aencoder)
7743 {
7744         int res = 0;
7745         int connector_type;
7746         struct dc *dc = dm->dc;
7747         struct dc_link *link = dc_get_link_at_index(dc, link_index);
7748         struct amdgpu_i2c_adapter *i2c;
7749
7750         link->priv = aconnector;
7751
7752         DRM_DEBUG_DRIVER("%s()\n", __func__);
7753
7754         i2c = create_i2c(link->ddc, link->link_index, &res);
7755         if (!i2c) {
7756                 DRM_ERROR("Failed to create i2c adapter data\n");
7757                 return -ENOMEM;
7758         }
7759
7760         aconnector->i2c = i2c;
7761         res = i2c_add_adapter(&i2c->base);
7762
7763         if (res) {
7764                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7765                 goto out_free;
7766         }
7767
7768         connector_type = to_drm_connector_type(link->connector_signal);
7769
7770         res = drm_connector_init_with_ddc(
7771                         dm->ddev,
7772                         &aconnector->base,
7773                         &amdgpu_dm_connector_funcs,
7774                         connector_type,
7775                         &i2c->base);
7776
7777         if (res) {
7778                 DRM_ERROR("connector_init failed\n");
7779                 aconnector->connector_id = -1;
7780                 goto out_free;
7781         }
7782
7783         drm_connector_helper_add(
7784                         &aconnector->base,
7785                         &amdgpu_dm_connector_helper_funcs);
7786
7787         amdgpu_dm_connector_init_helper(
7788                 dm,
7789                 aconnector,
7790                 connector_type,
7791                 link,
7792                 link_index);
7793
7794         drm_connector_attach_encoder(
7795                 &aconnector->base, &aencoder->base);
7796
7797         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7798                 || connector_type == DRM_MODE_CONNECTOR_eDP)
7799                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7800
7801 out_free:
7802         if (res) {
7803                 kfree(i2c);
7804                 aconnector->i2c = NULL;
7805         }
7806         return res;
7807 }
7808
7809 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7810 {
7811         switch (adev->mode_info.num_crtc) {
7812         case 1:
7813                 return 0x1;
7814         case 2:
7815                 return 0x3;
7816         case 3:
7817                 return 0x7;
7818         case 4:
7819                 return 0xf;
7820         case 5:
7821                 return 0x1f;
7822         case 6:
7823         default:
7824                 return 0x3f;
7825         }
7826 }
7827
7828 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7829                                   struct amdgpu_encoder *aencoder,
7830                                   uint32_t link_index)
7831 {
7832         struct amdgpu_device *adev = drm_to_adev(dev);
7833
7834         int res = drm_encoder_init(dev,
7835                                    &aencoder->base,
7836                                    &amdgpu_dm_encoder_funcs,
7837                                    DRM_MODE_ENCODER_TMDS,
7838                                    NULL);
7839
7840         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7841
7842         if (!res)
7843                 aencoder->encoder_id = link_index;
7844         else
7845                 aencoder->encoder_id = -1;
7846
7847         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7848
7849         return res;
7850 }
7851
7852 static void manage_dm_interrupts(struct amdgpu_device *adev,
7853                                  struct amdgpu_crtc *acrtc,
7854                                  bool enable)
7855 {
7856         /*
7857          * We have no guarantee that the frontend index maps to the same
7858          * backend index - some even map to more than one.
7859          *
7860          * TODO: Use a different interrupt or check DC itself for the mapping.
7861          */
7862         int irq_type =
7863                 amdgpu_display_crtc_idx_to_irq_type(
7864                         adev,
7865                         acrtc->crtc_id);
7866
7867         if (enable) {
7868                 drm_crtc_vblank_on(&acrtc->base);
7869                 amdgpu_irq_get(
7870                         adev,
7871                         &adev->pageflip_irq,
7872                         irq_type);
7873 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7874                 amdgpu_irq_get(
7875                         adev,
7876                         &adev->vline0_irq,
7877                         irq_type);
7878 #endif
7879         } else {
7880 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7881                 amdgpu_irq_put(
7882                         adev,
7883                         &adev->vline0_irq,
7884                         irq_type);
7885 #endif
7886                 amdgpu_irq_put(
7887                         adev,
7888                         &adev->pageflip_irq,
7889                         irq_type);
7890                 drm_crtc_vblank_off(&acrtc->base);
7891         }
7892 }
7893
7894 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7895                                       struct amdgpu_crtc *acrtc)
7896 {
7897         int irq_type =
7898                 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7899
7900         /**
7901          * This reads the current state for the IRQ and force reapplies
7902          * the setting to hardware.
7903          */
7904         amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7905 }
7906
7907 static bool
7908 is_scaling_state_different(const struct dm_connector_state *dm_state,
7909                            const struct dm_connector_state *old_dm_state)
7910 {
7911         if (dm_state->scaling != old_dm_state->scaling)
7912                 return true;
7913         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7914                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7915                         return true;
7916         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7917                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7918                         return true;
7919         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7920                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7921                 return true;
7922         return false;
7923 }
7924
7925 #ifdef CONFIG_DRM_AMD_DC_HDCP
7926 static bool is_content_protection_different(struct drm_connector_state *state,
7927                                             const struct drm_connector_state *old_state,
7928                                             const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7929 {
7930         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7931         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7932
7933         /* Handle: Type0/1 change */
7934         if (old_state->hdcp_content_type != state->hdcp_content_type &&
7935             state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7936                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7937                 return true;
7938         }
7939
7940         /* CP is being re enabled, ignore this
7941          *
7942          * Handles:     ENABLED -> DESIRED
7943          */
7944         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7945             state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7946                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7947                 return false;
7948         }
7949
7950         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7951          *
7952          * Handles:     UNDESIRED -> ENABLED
7953          */
7954         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7955             state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7956                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7957
7958         /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7959          * hot-plug, headless s3, dpms
7960          *
7961          * Handles:     DESIRED -> DESIRED (Special case)
7962          */
7963         if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7964             connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7965                 dm_con_state->update_hdcp = false;
7966                 return true;
7967         }
7968
7969         /*
7970          * Handles:     UNDESIRED -> UNDESIRED
7971          *              DESIRED -> DESIRED
7972          *              ENABLED -> ENABLED
7973          */
7974         if (old_state->content_protection == state->content_protection)
7975                 return false;
7976
7977         /*
7978          * Handles:     UNDESIRED -> DESIRED
7979          *              DESIRED -> UNDESIRED
7980          *              ENABLED -> UNDESIRED
7981          */
7982         if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7983                 return true;
7984
7985         /*
7986          * Handles:     DESIRED -> ENABLED
7987          */
7988         return false;
7989 }
7990
7991 #endif
7992 static void remove_stream(struct amdgpu_device *adev,
7993                           struct amdgpu_crtc *acrtc,
7994                           struct dc_stream_state *stream)
7995 {
7996         /* this is the update mode case */
7997
7998         acrtc->otg_inst = -1;
7999         acrtc->enabled = false;
8000 }
8001
8002 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8003                                struct dc_cursor_position *position)
8004 {
8005         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8006         int x, y;
8007         int xorigin = 0, yorigin = 0;
8008
8009         if (!crtc || !plane->state->fb)
8010                 return 0;
8011
8012         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8013             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8014                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8015                           __func__,
8016                           plane->state->crtc_w,
8017                           plane->state->crtc_h);
8018                 return -EINVAL;
8019         }
8020
8021         x = plane->state->crtc_x;
8022         y = plane->state->crtc_y;
8023
8024         if (x <= -amdgpu_crtc->max_cursor_width ||
8025             y <= -amdgpu_crtc->max_cursor_height)
8026                 return 0;
8027
8028         if (x < 0) {
8029                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8030                 x = 0;
8031         }
8032         if (y < 0) {
8033                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8034                 y = 0;
8035         }
8036         position->enable = true;
8037         position->translate_by_source = true;
8038         position->x = x;
8039         position->y = y;
8040         position->x_hotspot = xorigin;
8041         position->y_hotspot = yorigin;
8042
8043         return 0;
8044 }
8045
8046 static void handle_cursor_update(struct drm_plane *plane,
8047                                  struct drm_plane_state *old_plane_state)
8048 {
8049         struct amdgpu_device *adev = drm_to_adev(plane->dev);
8050         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8051         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8052         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8053         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8054         uint64_t address = afb ? afb->address : 0;
8055         struct dc_cursor_position position = {0};
8056         struct dc_cursor_attributes attributes;
8057         int ret;
8058
8059         if (!plane->state->fb && !old_plane_state->fb)
8060                 return;
8061
8062         DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8063                       __func__,
8064                       amdgpu_crtc->crtc_id,
8065                       plane->state->crtc_w,
8066                       plane->state->crtc_h);
8067
8068         ret = get_cursor_position(plane, crtc, &position);
8069         if (ret)
8070                 return;
8071
8072         if (!position.enable) {
8073                 /* turn off cursor */
8074                 if (crtc_state && crtc_state->stream) {
8075                         mutex_lock(&adev->dm.dc_lock);
8076                         dc_stream_set_cursor_position(crtc_state->stream,
8077                                                       &position);
8078                         mutex_unlock(&adev->dm.dc_lock);
8079                 }
8080                 return;
8081         }
8082
8083         amdgpu_crtc->cursor_width = plane->state->crtc_w;
8084         amdgpu_crtc->cursor_height = plane->state->crtc_h;
8085
8086         memset(&attributes, 0, sizeof(attributes));
8087         attributes.address.high_part = upper_32_bits(address);
8088         attributes.address.low_part  = lower_32_bits(address);
8089         attributes.width             = plane->state->crtc_w;
8090         attributes.height            = plane->state->crtc_h;
8091         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8092         attributes.rotation_angle    = 0;
8093         attributes.attribute_flags.value = 0;
8094
8095         attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8096
8097         if (crtc_state->stream) {
8098                 mutex_lock(&adev->dm.dc_lock);
8099                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8100                                                          &attributes))
8101                         DRM_ERROR("DC failed to set cursor attributes\n");
8102
8103                 if (!dc_stream_set_cursor_position(crtc_state->stream,
8104                                                    &position))
8105                         DRM_ERROR("DC failed to set cursor position\n");
8106                 mutex_unlock(&adev->dm.dc_lock);
8107         }
8108 }
8109
8110 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8111 {
8112
8113         assert_spin_locked(&acrtc->base.dev->event_lock);
8114         WARN_ON(acrtc->event);
8115
8116         acrtc->event = acrtc->base.state->event;
8117
8118         /* Set the flip status */
8119         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8120
8121         /* Mark this event as consumed */
8122         acrtc->base.state->event = NULL;
8123
8124         DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8125                      acrtc->crtc_id);
8126 }
8127
8128 static void update_freesync_state_on_stream(
8129         struct amdgpu_display_manager *dm,
8130         struct dm_crtc_state *new_crtc_state,
8131         struct dc_stream_state *new_stream,
8132         struct dc_plane_state *surface,
8133         u32 flip_timestamp_in_us)
8134 {
8135         struct mod_vrr_params vrr_params;
8136         struct dc_info_packet vrr_infopacket = {0};
8137         struct amdgpu_device *adev = dm->adev;
8138         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8139         unsigned long flags;
8140         bool pack_sdp_v1_3 = false;
8141
8142         if (!new_stream)
8143                 return;
8144
8145         /*
8146          * TODO: Determine why min/max totals and vrefresh can be 0 here.
8147          * For now it's sufficient to just guard against these conditions.
8148          */
8149
8150         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8151                 return;
8152
8153         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8154         vrr_params = acrtc->dm_irq_params.vrr_params;
8155
8156         if (surface) {
8157                 mod_freesync_handle_preflip(
8158                         dm->freesync_module,
8159                         surface,
8160                         new_stream,
8161                         flip_timestamp_in_us,
8162                         &vrr_params);
8163
8164                 if (adev->family < AMDGPU_FAMILY_AI &&
8165                     amdgpu_dm_vrr_active(new_crtc_state)) {
8166                         mod_freesync_handle_v_update(dm->freesync_module,
8167                                                      new_stream, &vrr_params);
8168
8169                         /* Need to call this before the frame ends. */
8170                         dc_stream_adjust_vmin_vmax(dm->dc,
8171                                                    new_crtc_state->stream,
8172                                                    &vrr_params.adjust);
8173                 }
8174         }
8175
8176         mod_freesync_build_vrr_infopacket(
8177                 dm->freesync_module,
8178                 new_stream,
8179                 &vrr_params,
8180                 PACKET_TYPE_VRR,
8181                 TRANSFER_FUNC_UNKNOWN,
8182                 &vrr_infopacket,
8183                 pack_sdp_v1_3);
8184
8185         new_crtc_state->freesync_timing_changed |=
8186                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8187                         &vrr_params.adjust,
8188                         sizeof(vrr_params.adjust)) != 0);
8189
8190         new_crtc_state->freesync_vrr_info_changed |=
8191                 (memcmp(&new_crtc_state->vrr_infopacket,
8192                         &vrr_infopacket,
8193                         sizeof(vrr_infopacket)) != 0);
8194
8195         acrtc->dm_irq_params.vrr_params = vrr_params;
8196         new_crtc_state->vrr_infopacket = vrr_infopacket;
8197
8198         new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8199         new_stream->vrr_infopacket = vrr_infopacket;
8200
8201         if (new_crtc_state->freesync_vrr_info_changed)
8202                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8203                               new_crtc_state->base.crtc->base.id,
8204                               (int)new_crtc_state->base.vrr_enabled,
8205                               (int)vrr_params.state);
8206
8207         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8208 }
8209
8210 static void update_stream_irq_parameters(
8211         struct amdgpu_display_manager *dm,
8212         struct dm_crtc_state *new_crtc_state)
8213 {
8214         struct dc_stream_state *new_stream = new_crtc_state->stream;
8215         struct mod_vrr_params vrr_params;
8216         struct mod_freesync_config config = new_crtc_state->freesync_config;
8217         struct amdgpu_device *adev = dm->adev;
8218         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8219         unsigned long flags;
8220
8221         if (!new_stream)
8222                 return;
8223
8224         /*
8225          * TODO: Determine why min/max totals and vrefresh can be 0 here.
8226          * For now it's sufficient to just guard against these conditions.
8227          */
8228         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8229                 return;
8230
8231         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8232         vrr_params = acrtc->dm_irq_params.vrr_params;
8233
8234         if (new_crtc_state->vrr_supported &&
8235             config.min_refresh_in_uhz &&
8236             config.max_refresh_in_uhz) {
8237                 /*
8238                  * if freesync compatible mode was set, config.state will be set
8239                  * in atomic check
8240                  */
8241                 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8242                     (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8243                      new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8244                         vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8245                         vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8246                         vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8247                         vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8248                 } else {
8249                         config.state = new_crtc_state->base.vrr_enabled ?
8250                                                      VRR_STATE_ACTIVE_VARIABLE :
8251                                                      VRR_STATE_INACTIVE;
8252                 }
8253         } else {
8254                 config.state = VRR_STATE_UNSUPPORTED;
8255         }
8256
8257         mod_freesync_build_vrr_params(dm->freesync_module,
8258                                       new_stream,
8259                                       &config, &vrr_params);
8260
8261         new_crtc_state->freesync_timing_changed |=
8262                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8263                         &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8264
8265         new_crtc_state->freesync_config = config;
8266         /* Copy state for access from DM IRQ handler */
8267         acrtc->dm_irq_params.freesync_config = config;
8268         acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8269         acrtc->dm_irq_params.vrr_params = vrr_params;
8270         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8271 }
8272
8273 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8274                                             struct dm_crtc_state *new_state)
8275 {
8276         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8277         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8278
8279         if (!old_vrr_active && new_vrr_active) {
8280                 /* Transition VRR inactive -> active:
8281                  * While VRR is active, we must not disable vblank irq, as a
8282                  * reenable after disable would compute bogus vblank/pflip
8283                  * timestamps if it likely happened inside display front-porch.
8284                  *
8285                  * We also need vupdate irq for the actual core vblank handling
8286                  * at end of vblank.
8287                  */
8288                 dm_set_vupdate_irq(new_state->base.crtc, true);
8289                 drm_crtc_vblank_get(new_state->base.crtc);
8290                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8291                                  __func__, new_state->base.crtc->base.id);
8292         } else if (old_vrr_active && !new_vrr_active) {
8293                 /* Transition VRR active -> inactive:
8294                  * Allow vblank irq disable again for fixed refresh rate.
8295                  */
8296                 dm_set_vupdate_irq(new_state->base.crtc, false);
8297                 drm_crtc_vblank_put(new_state->base.crtc);
8298                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8299                                  __func__, new_state->base.crtc->base.id);
8300         }
8301 }
8302
8303 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8304 {
8305         struct drm_plane *plane;
8306         struct drm_plane_state *old_plane_state;
8307         int i;
8308
8309         /*
8310          * TODO: Make this per-stream so we don't issue redundant updates for
8311          * commits with multiple streams.
8312          */
8313         for_each_old_plane_in_state(state, plane, old_plane_state, i)
8314                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8315                         handle_cursor_update(plane, old_plane_state);
8316 }
8317
8318 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8319                                     struct dc_state *dc_state,
8320                                     struct drm_device *dev,
8321                                     struct amdgpu_display_manager *dm,
8322                                     struct drm_crtc *pcrtc,
8323                                     bool wait_for_vblank)
8324 {
8325         uint32_t i;
8326         uint64_t timestamp_ns;
8327         struct drm_plane *plane;
8328         struct drm_plane_state *old_plane_state, *new_plane_state;
8329         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8330         struct drm_crtc_state *new_pcrtc_state =
8331                         drm_atomic_get_new_crtc_state(state, pcrtc);
8332         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8333         struct dm_crtc_state *dm_old_crtc_state =
8334                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8335         int planes_count = 0, vpos, hpos;
8336         long r;
8337         unsigned long flags;
8338         struct amdgpu_bo *abo;
8339         uint32_t target_vblank, last_flip_vblank;
8340         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8341         bool pflip_present = false;
8342         struct {
8343                 struct dc_surface_update surface_updates[MAX_SURFACES];
8344                 struct dc_plane_info plane_infos[MAX_SURFACES];
8345                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8346                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8347                 struct dc_stream_update stream_update;
8348         } *bundle;
8349
8350         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8351
8352         if (!bundle) {
8353                 dm_error("Failed to allocate update bundle\n");
8354                 goto cleanup;
8355         }
8356
8357         /*
8358          * Disable the cursor first if we're disabling all the planes.
8359          * It'll remain on the screen after the planes are re-enabled
8360          * if we don't.
8361          */
8362         if (acrtc_state->active_planes == 0)
8363                 amdgpu_dm_commit_cursors(state);
8364
8365         /* update planes when needed */
8366         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8367                 struct drm_crtc *crtc = new_plane_state->crtc;
8368                 struct drm_crtc_state *new_crtc_state;
8369                 struct drm_framebuffer *fb = new_plane_state->fb;
8370                 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8371                 bool plane_needs_flip;
8372                 struct dc_plane_state *dc_plane;
8373                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8374
8375                 /* Cursor plane is handled after stream updates */
8376                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8377                         continue;
8378
8379                 if (!fb || !crtc || pcrtc != crtc)
8380                         continue;
8381
8382                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8383                 if (!new_crtc_state->active)
8384                         continue;
8385
8386                 dc_plane = dm_new_plane_state->dc_state;
8387
8388                 bundle->surface_updates[planes_count].surface = dc_plane;
8389                 if (new_pcrtc_state->color_mgmt_changed) {
8390                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8391                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8392                         bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8393                 }
8394
8395                 fill_dc_scaling_info(new_plane_state,
8396                                      &bundle->scaling_infos[planes_count]);
8397
8398                 bundle->surface_updates[planes_count].scaling_info =
8399                         &bundle->scaling_infos[planes_count];
8400
8401                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8402
8403                 pflip_present = pflip_present || plane_needs_flip;
8404
8405                 if (!plane_needs_flip) {
8406                         planes_count += 1;
8407                         continue;
8408                 }
8409
8410                 abo = gem_to_amdgpu_bo(fb->obj[0]);
8411
8412                 /*
8413                  * Wait for all fences on this FB. Do limited wait to avoid
8414                  * deadlock during GPU reset when this fence will not signal
8415                  * but we hold reservation lock for the BO.
8416                  */
8417                 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
8418                                                         false,
8419                                                         msecs_to_jiffies(5000));
8420                 if (unlikely(r <= 0))
8421                         DRM_ERROR("Waiting for fences timed out!");
8422
8423                 fill_dc_plane_info_and_addr(
8424                         dm->adev, new_plane_state,
8425                         afb->tiling_flags,
8426                         &bundle->plane_infos[planes_count],
8427                         &bundle->flip_addrs[planes_count].address,
8428                         afb->tmz_surface, false);
8429
8430                 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
8431                                  new_plane_state->plane->index,
8432                                  bundle->plane_infos[planes_count].dcc.enable);
8433
8434                 bundle->surface_updates[planes_count].plane_info =
8435                         &bundle->plane_infos[planes_count];
8436
8437                 /*
8438                  * Only allow immediate flips for fast updates that don't
8439                  * change FB pitch, DCC state, rotation or mirroing.
8440                  */
8441                 bundle->flip_addrs[planes_count].flip_immediate =
8442                         crtc->state->async_flip &&
8443                         acrtc_state->update_type == UPDATE_TYPE_FAST;
8444
8445                 timestamp_ns = ktime_get_ns();
8446                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8447                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8448                 bundle->surface_updates[planes_count].surface = dc_plane;
8449
8450                 if (!bundle->surface_updates[planes_count].surface) {
8451                         DRM_ERROR("No surface for CRTC: id=%d\n",
8452                                         acrtc_attach->crtc_id);
8453                         continue;
8454                 }
8455
8456                 if (plane == pcrtc->primary)
8457                         update_freesync_state_on_stream(
8458                                 dm,
8459                                 acrtc_state,
8460                                 acrtc_state->stream,
8461                                 dc_plane,
8462                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
8463
8464                 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
8465                                  __func__,
8466                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8467                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
8468
8469                 planes_count += 1;
8470
8471         }
8472
8473         if (pflip_present) {
8474                 if (!vrr_active) {
8475                         /* Use old throttling in non-vrr fixed refresh rate mode
8476                          * to keep flip scheduling based on target vblank counts
8477                          * working in a backwards compatible way, e.g., for
8478                          * clients using the GLX_OML_sync_control extension or
8479                          * DRI3/Present extension with defined target_msc.
8480                          */
8481                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
8482                 }
8483                 else {
8484                         /* For variable refresh rate mode only:
8485                          * Get vblank of last completed flip to avoid > 1 vrr
8486                          * flips per video frame by use of throttling, but allow
8487                          * flip programming anywhere in the possibly large
8488                          * variable vrr vblank interval for fine-grained flip
8489                          * timing control and more opportunity to avoid stutter
8490                          * on late submission of flips.
8491                          */
8492                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8493                         last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
8494                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8495                 }
8496
8497                 target_vblank = last_flip_vblank + wait_for_vblank;
8498
8499                 /*
8500                  * Wait until we're out of the vertical blank period before the one
8501                  * targeted by the flip
8502                  */
8503                 while ((acrtc_attach->enabled &&
8504                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8505                                                             0, &vpos, &hpos, NULL,
8506                                                             NULL, &pcrtc->hwmode)
8507                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8508                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8509                         (int)(target_vblank -
8510                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8511                         usleep_range(1000, 1100);
8512                 }
8513
8514                 /**
8515                  * Prepare the flip event for the pageflip interrupt to handle.
8516                  *
8517                  * This only works in the case where we've already turned on the
8518                  * appropriate hardware blocks (eg. HUBP) so in the transition case
8519                  * from 0 -> n planes we have to skip a hardware generated event
8520                  * and rely on sending it from software.
8521                  */
8522                 if (acrtc_attach->base.state->event &&
8523                     acrtc_state->active_planes > 0) {
8524                         drm_crtc_vblank_get(pcrtc);
8525
8526                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8527
8528                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8529                         prepare_flip_isr(acrtc_attach);
8530
8531                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8532                 }
8533
8534                 if (acrtc_state->stream) {
8535                         if (acrtc_state->freesync_vrr_info_changed)
8536                                 bundle->stream_update.vrr_infopacket =
8537                                         &acrtc_state->stream->vrr_infopacket;
8538                 }
8539         }
8540
8541         /* Update the planes if changed or disable if we don't have any. */
8542         if ((planes_count || acrtc_state->active_planes == 0) &&
8543                 acrtc_state->stream) {
8544                 bundle->stream_update.stream = acrtc_state->stream;
8545                 if (new_pcrtc_state->mode_changed) {
8546                         bundle->stream_update.src = acrtc_state->stream->src;
8547                         bundle->stream_update.dst = acrtc_state->stream->dst;
8548                 }
8549
8550                 if (new_pcrtc_state->color_mgmt_changed) {
8551                         /*
8552                          * TODO: This isn't fully correct since we've actually
8553                          * already modified the stream in place.
8554                          */
8555                         bundle->stream_update.gamut_remap =
8556                                 &acrtc_state->stream->gamut_remap_matrix;
8557                         bundle->stream_update.output_csc_transform =
8558                                 &acrtc_state->stream->csc_color_matrix;
8559                         bundle->stream_update.out_transfer_func =
8560                                 acrtc_state->stream->out_transfer_func;
8561                 }
8562
8563                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
8564                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
8565                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
8566
8567                 /*
8568                  * If FreeSync state on the stream has changed then we need to
8569                  * re-adjust the min/max bounds now that DC doesn't handle this
8570                  * as part of commit.
8571                  */
8572                 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
8573                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8574                         dc_stream_adjust_vmin_vmax(
8575                                 dm->dc, acrtc_state->stream,
8576                                 &acrtc_attach->dm_irq_params.vrr_params.adjust);
8577                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8578                 }
8579                 mutex_lock(&dm->dc_lock);
8580                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8581                                 acrtc_state->stream->link->psr_settings.psr_allow_active)
8582                         amdgpu_dm_psr_disable(acrtc_state->stream);
8583
8584                 dc_commit_updates_for_stream(dm->dc,
8585                                                      bundle->surface_updates,
8586                                                      planes_count,
8587                                                      acrtc_state->stream,
8588                                                      &bundle->stream_update,
8589                                                      dc_state);
8590
8591                 /**
8592                  * Enable or disable the interrupts on the backend.
8593                  *
8594                  * Most pipes are put into power gating when unused.
8595                  *
8596                  * When power gating is enabled on a pipe we lose the
8597                  * interrupt enablement state when power gating is disabled.
8598                  *
8599                  * So we need to update the IRQ control state in hardware
8600                  * whenever the pipe turns on (since it could be previously
8601                  * power gated) or off (since some pipes can't be power gated
8602                  * on some ASICs).
8603                  */
8604                 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8605                         dm_update_pflip_irq_state(drm_to_adev(dev),
8606                                                   acrtc_attach);
8607
8608                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8609                                 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8610                                 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8611                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
8612                 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
8613                                 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8614                                 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
8615                         amdgpu_dm_psr_enable(acrtc_state->stream);
8616                 }
8617
8618                 mutex_unlock(&dm->dc_lock);
8619         }
8620
8621         /*
8622          * Update cursor state *after* programming all the planes.
8623          * This avoids redundant programming in the case where we're going
8624          * to be disabling a single plane - those pipes are being disabled.
8625          */
8626         if (acrtc_state->active_planes)
8627                 amdgpu_dm_commit_cursors(state);
8628
8629 cleanup:
8630         kfree(bundle);
8631 }
8632
8633 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8634                                    struct drm_atomic_state *state)
8635 {
8636         struct amdgpu_device *adev = drm_to_adev(dev);
8637         struct amdgpu_dm_connector *aconnector;
8638         struct drm_connector *connector;
8639         struct drm_connector_state *old_con_state, *new_con_state;
8640         struct drm_crtc_state *new_crtc_state;
8641         struct dm_crtc_state *new_dm_crtc_state;
8642         const struct dc_stream_status *status;
8643         int i, inst;
8644
8645         /* Notify device removals. */
8646         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8647                 if (old_con_state->crtc != new_con_state->crtc) {
8648                         /* CRTC changes require notification. */
8649                         goto notify;
8650                 }
8651
8652                 if (!new_con_state->crtc)
8653                         continue;
8654
8655                 new_crtc_state = drm_atomic_get_new_crtc_state(
8656                         state, new_con_state->crtc);
8657
8658                 if (!new_crtc_state)
8659                         continue;
8660
8661                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8662                         continue;
8663
8664         notify:
8665                 aconnector = to_amdgpu_dm_connector(connector);
8666
8667                 mutex_lock(&adev->dm.audio_lock);
8668                 inst = aconnector->audio_inst;
8669                 aconnector->audio_inst = -1;
8670                 mutex_unlock(&adev->dm.audio_lock);
8671
8672                 amdgpu_dm_audio_eld_notify(adev, inst);
8673         }
8674
8675         /* Notify audio device additions. */
8676         for_each_new_connector_in_state(state, connector, new_con_state, i) {
8677                 if (!new_con_state->crtc)
8678                         continue;
8679
8680                 new_crtc_state = drm_atomic_get_new_crtc_state(
8681                         state, new_con_state->crtc);
8682
8683                 if (!new_crtc_state)
8684                         continue;
8685
8686                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8687                         continue;
8688
8689                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8690                 if (!new_dm_crtc_state->stream)
8691                         continue;
8692
8693                 status = dc_stream_get_status(new_dm_crtc_state->stream);
8694                 if (!status)
8695                         continue;
8696
8697                 aconnector = to_amdgpu_dm_connector(connector);
8698
8699                 mutex_lock(&adev->dm.audio_lock);
8700                 inst = status->audio_inst;
8701                 aconnector->audio_inst = inst;
8702                 mutex_unlock(&adev->dm.audio_lock);
8703
8704                 amdgpu_dm_audio_eld_notify(adev, inst);
8705         }
8706 }
8707
8708 /*
8709  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8710  * @crtc_state: the DRM CRTC state
8711  * @stream_state: the DC stream state.
8712  *
8713  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8714  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8715  */
8716 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8717                                                 struct dc_stream_state *stream_state)
8718 {
8719         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8720 }
8721
8722 /**
8723  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8724  * @state: The atomic state to commit
8725  *
8726  * This will tell DC to commit the constructed DC state from atomic_check,
8727  * programming the hardware. Any failures here implies a hardware failure, since
8728  * atomic check should have filtered anything non-kosher.
8729  */
8730 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8731 {
8732         struct drm_device *dev = state->dev;
8733         struct amdgpu_device *adev = drm_to_adev(dev);
8734         struct amdgpu_display_manager *dm = &adev->dm;
8735         struct dm_atomic_state *dm_state;
8736         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8737         uint32_t i, j;
8738         struct drm_crtc *crtc;
8739         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8740         unsigned long flags;
8741         bool wait_for_vblank = true;
8742         struct drm_connector *connector;
8743         struct drm_connector_state *old_con_state, *new_con_state;
8744         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8745         int crtc_disable_count = 0;
8746         bool mode_set_reset_required = false;
8747
8748         trace_amdgpu_dm_atomic_commit_tail_begin(state);
8749
8750         drm_atomic_helper_update_legacy_modeset_state(dev, state);
8751
8752         dm_state = dm_atomic_get_new_state(state);
8753         if (dm_state && dm_state->context) {
8754                 dc_state = dm_state->context;
8755         } else {
8756                 /* No state changes, retain current state. */
8757                 dc_state_temp = dc_create_state(dm->dc);
8758                 ASSERT(dc_state_temp);
8759                 dc_state = dc_state_temp;
8760                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8761         }
8762
8763         for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8764                                        new_crtc_state, i) {
8765                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8766
8767                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8768
8769                 if (old_crtc_state->active &&
8770                     (!new_crtc_state->active ||
8771                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8772                         manage_dm_interrupts(adev, acrtc, false);
8773                         dc_stream_release(dm_old_crtc_state->stream);
8774                 }
8775         }
8776
8777         drm_atomic_helper_calc_timestamping_constants(state);
8778
8779         /* update changed items */
8780         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8781                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8782
8783                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8784                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8785
8786                 DRM_DEBUG_ATOMIC(
8787                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8788                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8789                         "connectors_changed:%d\n",
8790                         acrtc->crtc_id,
8791                         new_crtc_state->enable,
8792                         new_crtc_state->active,
8793                         new_crtc_state->planes_changed,
8794                         new_crtc_state->mode_changed,
8795                         new_crtc_state->active_changed,
8796                         new_crtc_state->connectors_changed);
8797
8798                 /* Disable cursor if disabling crtc */
8799                 if (old_crtc_state->active && !new_crtc_state->active) {
8800                         struct dc_cursor_position position;
8801
8802                         memset(&position, 0, sizeof(position));
8803                         mutex_lock(&dm->dc_lock);
8804                         dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8805                         mutex_unlock(&dm->dc_lock);
8806                 }
8807
8808                 /* Copy all transient state flags into dc state */
8809                 if (dm_new_crtc_state->stream) {
8810                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8811                                                             dm_new_crtc_state->stream);
8812                 }
8813
8814                 /* handles headless hotplug case, updating new_state and
8815                  * aconnector as needed
8816                  */
8817
8818                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8819
8820                         DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8821
8822                         if (!dm_new_crtc_state->stream) {
8823                                 /*
8824                                  * this could happen because of issues with
8825                                  * userspace notifications delivery.
8826                                  * In this case userspace tries to set mode on
8827                                  * display which is disconnected in fact.
8828                                  * dc_sink is NULL in this case on aconnector.
8829                                  * We expect reset mode will come soon.
8830                                  *
8831                                  * This can also happen when unplug is done
8832                                  * during resume sequence ended
8833                                  *
8834                                  * In this case, we want to pretend we still
8835                                  * have a sink to keep the pipe running so that
8836                                  * hw state is consistent with the sw state
8837                                  */
8838                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8839                                                 __func__, acrtc->base.base.id);
8840                                 continue;
8841                         }
8842
8843                         if (dm_old_crtc_state->stream)
8844                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8845
8846                         pm_runtime_get_noresume(dev->dev);
8847
8848                         acrtc->enabled = true;
8849                         acrtc->hw_mode = new_crtc_state->mode;
8850                         crtc->hwmode = new_crtc_state->mode;
8851                         mode_set_reset_required = true;
8852                 } else if (modereset_required(new_crtc_state)) {
8853                         DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8854                         /* i.e. reset mode */
8855                         if (dm_old_crtc_state->stream)
8856                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8857
8858                         mode_set_reset_required = true;
8859                 }
8860         } /* for_each_crtc_in_state() */
8861
8862         if (dc_state) {
8863                 /* if there mode set or reset, disable eDP PSR */
8864                 if (mode_set_reset_required)
8865                         amdgpu_dm_psr_disable_all(dm);
8866
8867                 dm_enable_per_frame_crtc_master_sync(dc_state);
8868                 mutex_lock(&dm->dc_lock);
8869                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
8870 #if defined(CONFIG_DRM_AMD_DC_DCN)
8871                /* Allow idle optimization when vblank count is 0 for display off */
8872                if (dm->active_vblank_irq_count == 0)
8873                    dc_allow_idle_optimizations(dm->dc,true);
8874 #endif
8875                 mutex_unlock(&dm->dc_lock);
8876         }
8877
8878         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8879                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8880
8881                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8882
8883                 if (dm_new_crtc_state->stream != NULL) {
8884                         const struct dc_stream_status *status =
8885                                         dc_stream_get_status(dm_new_crtc_state->stream);
8886
8887                         if (!status)
8888                                 status = dc_stream_get_status_from_state(dc_state,
8889                                                                          dm_new_crtc_state->stream);
8890                         if (!status)
8891                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8892                         else
8893                                 acrtc->otg_inst = status->primary_otg_inst;
8894                 }
8895         }
8896 #ifdef CONFIG_DRM_AMD_DC_HDCP
8897         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8898                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8899                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8900                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8901
8902                 new_crtc_state = NULL;
8903
8904                 if (acrtc)
8905                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8906
8907                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8908
8909                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8910                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8911                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8912                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8913                         dm_new_con_state->update_hdcp = true;
8914                         continue;
8915                 }
8916
8917                 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8918                         hdcp_update_display(
8919                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8920                                 new_con_state->hdcp_content_type,
8921                                 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
8922         }
8923 #endif
8924
8925         /* Handle connector state changes */
8926         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8927                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8928                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8929                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8930                 struct dc_surface_update dummy_updates[MAX_SURFACES];
8931                 struct dc_stream_update stream_update;
8932                 struct dc_info_packet hdr_packet;
8933                 struct dc_stream_status *status = NULL;
8934                 bool abm_changed, hdr_changed, scaling_changed;
8935
8936                 memset(&dummy_updates, 0, sizeof(dummy_updates));
8937                 memset(&stream_update, 0, sizeof(stream_update));
8938
8939                 if (acrtc) {
8940                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8941                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8942                 }
8943
8944                 /* Skip any modesets/resets */
8945                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8946                         continue;
8947
8948                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8949                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8950
8951                 scaling_changed = is_scaling_state_different(dm_new_con_state,
8952                                                              dm_old_con_state);
8953
8954                 abm_changed = dm_new_crtc_state->abm_level !=
8955                               dm_old_crtc_state->abm_level;
8956
8957                 hdr_changed =
8958                         is_hdr_metadata_different(old_con_state, new_con_state);
8959
8960                 if (!scaling_changed && !abm_changed && !hdr_changed)
8961                         continue;
8962
8963                 stream_update.stream = dm_new_crtc_state->stream;
8964                 if (scaling_changed) {
8965                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8966                                         dm_new_con_state, dm_new_crtc_state->stream);
8967
8968                         stream_update.src = dm_new_crtc_state->stream->src;
8969                         stream_update.dst = dm_new_crtc_state->stream->dst;
8970                 }
8971
8972                 if (abm_changed) {
8973                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8974
8975                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
8976                 }
8977
8978                 if (hdr_changed) {
8979                         fill_hdr_info_packet(new_con_state, &hdr_packet);
8980                         stream_update.hdr_static_metadata = &hdr_packet;
8981                 }
8982
8983                 status = dc_stream_get_status(dm_new_crtc_state->stream);
8984                 WARN_ON(!status);
8985                 WARN_ON(!status->plane_count);
8986
8987                 /*
8988                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
8989                  * Here we create an empty update on each plane.
8990                  * To fix this, DC should permit updating only stream properties.
8991                  */
8992                 for (j = 0; j < status->plane_count; j++)
8993                         dummy_updates[j].surface = status->plane_states[0];
8994
8995
8996                 mutex_lock(&dm->dc_lock);
8997                 dc_commit_updates_for_stream(dm->dc,
8998                                                      dummy_updates,
8999                                                      status->plane_count,
9000                                                      dm_new_crtc_state->stream,
9001                                                      &stream_update,
9002                                                      dc_state);
9003                 mutex_unlock(&dm->dc_lock);
9004         }
9005
9006         /* Count number of newly disabled CRTCs for dropping PM refs later. */
9007         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9008                                       new_crtc_state, i) {
9009                 if (old_crtc_state->active && !new_crtc_state->active)
9010                         crtc_disable_count++;
9011
9012                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9013                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9014
9015                 /* For freesync config update on crtc state and params for irq */
9016                 update_stream_irq_parameters(dm, dm_new_crtc_state);
9017
9018                 /* Handle vrr on->off / off->on transitions */
9019                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9020                                                 dm_new_crtc_state);
9021         }
9022
9023         /**
9024          * Enable interrupts for CRTCs that are newly enabled or went through
9025          * a modeset. It was intentionally deferred until after the front end
9026          * state was modified to wait until the OTG was on and so the IRQ
9027          * handlers didn't access stale or invalid state.
9028          */
9029         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9030                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9031 #ifdef CONFIG_DEBUG_FS
9032                 bool configure_crc = false;
9033                 enum amdgpu_dm_pipe_crc_source cur_crc_src;
9034 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9035                 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9036 #endif
9037                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9038                 cur_crc_src = acrtc->dm_irq_params.crc_src;
9039                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9040 #endif
9041                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9042
9043                 if (new_crtc_state->active &&
9044                     (!old_crtc_state->active ||
9045                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9046                         dc_stream_retain(dm_new_crtc_state->stream);
9047                         acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9048                         manage_dm_interrupts(adev, acrtc, true);
9049
9050 #ifdef CONFIG_DEBUG_FS
9051                         /**
9052                          * Frontend may have changed so reapply the CRC capture
9053                          * settings for the stream.
9054                          */
9055                         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9056
9057                         if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9058                                 configure_crc = true;
9059 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9060                                 if (amdgpu_dm_crc_window_is_activated(crtc)) {
9061                                         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9062                                         acrtc->dm_irq_params.crc_window.update_win = true;
9063                                         acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9064                                         spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9065                                         crc_rd_wrk->crtc = crtc;
9066                                         spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9067                                         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9068                                 }
9069 #endif
9070                         }
9071
9072                         if (configure_crc)
9073                                 if (amdgpu_dm_crtc_configure_crc_source(
9074                                         crtc, dm_new_crtc_state, cur_crc_src))
9075                                         DRM_DEBUG_DRIVER("Failed to configure crc source");
9076 #endif
9077                 }
9078         }
9079
9080         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9081                 if (new_crtc_state->async_flip)
9082                         wait_for_vblank = false;
9083
9084         /* update planes when needed per crtc*/
9085         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9086                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9087
9088                 if (dm_new_crtc_state->stream)
9089                         amdgpu_dm_commit_planes(state, dc_state, dev,
9090                                                 dm, crtc, wait_for_vblank);
9091         }
9092
9093         /* Update audio instances for each connector. */
9094         amdgpu_dm_commit_audio(dev, state);
9095
9096         /*
9097          * send vblank event on all events not handled in flip and
9098          * mark consumed event for drm_atomic_helper_commit_hw_done
9099          */
9100         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9101         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9102
9103                 if (new_crtc_state->event)
9104                         drm_send_event_locked(dev, &new_crtc_state->event->base);
9105
9106                 new_crtc_state->event = NULL;
9107         }
9108         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9109
9110         /* Signal HW programming completion */
9111         drm_atomic_helper_commit_hw_done(state);
9112
9113         if (wait_for_vblank)
9114                 drm_atomic_helper_wait_for_flip_done(dev, state);
9115
9116         drm_atomic_helper_cleanup_planes(dev, state);
9117
9118         /* return the stolen vga memory back to VRAM */
9119         if (!adev->mman.keep_stolen_vga_memory)
9120                 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9121         amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9122
9123         /*
9124          * Finally, drop a runtime PM reference for each newly disabled CRTC,
9125          * so we can put the GPU into runtime suspend if we're not driving any
9126          * displays anymore
9127          */
9128         for (i = 0; i < crtc_disable_count; i++)
9129                 pm_runtime_put_autosuspend(dev->dev);
9130         pm_runtime_mark_last_busy(dev->dev);
9131
9132         if (dc_state_temp)
9133                 dc_release_state(dc_state_temp);
9134 }
9135
9136
9137 static int dm_force_atomic_commit(struct drm_connector *connector)
9138 {
9139         int ret = 0;
9140         struct drm_device *ddev = connector->dev;
9141         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9142         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9143         struct drm_plane *plane = disconnected_acrtc->base.primary;
9144         struct drm_connector_state *conn_state;
9145         struct drm_crtc_state *crtc_state;
9146         struct drm_plane_state *plane_state;
9147
9148         if (!state)
9149                 return -ENOMEM;
9150
9151         state->acquire_ctx = ddev->mode_config.acquire_ctx;
9152
9153         /* Construct an atomic state to restore previous display setting */
9154
9155         /*
9156          * Attach connectors to drm_atomic_state
9157          */
9158         conn_state = drm_atomic_get_connector_state(state, connector);
9159
9160         ret = PTR_ERR_OR_ZERO(conn_state);
9161         if (ret)
9162                 goto out;
9163
9164         /* Attach crtc to drm_atomic_state*/
9165         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9166
9167         ret = PTR_ERR_OR_ZERO(crtc_state);
9168         if (ret)
9169                 goto out;
9170
9171         /* force a restore */
9172         crtc_state->mode_changed = true;
9173
9174         /* Attach plane to drm_atomic_state */
9175         plane_state = drm_atomic_get_plane_state(state, plane);
9176
9177         ret = PTR_ERR_OR_ZERO(plane_state);
9178         if (ret)
9179                 goto out;
9180
9181         /* Call commit internally with the state we just constructed */
9182         ret = drm_atomic_commit(state);
9183
9184 out:
9185         drm_atomic_state_put(state);
9186         if (ret)
9187                 DRM_ERROR("Restoring old state failed with %i\n", ret);
9188
9189         return ret;
9190 }
9191
9192 /*
9193  * This function handles all cases when set mode does not come upon hotplug.
9194  * This includes when a display is unplugged then plugged back into the
9195  * same port and when running without usermode desktop manager supprot
9196  */
9197 void dm_restore_drm_connector_state(struct drm_device *dev,
9198                                     struct drm_connector *connector)
9199 {
9200         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9201         struct amdgpu_crtc *disconnected_acrtc;
9202         struct dm_crtc_state *acrtc_state;
9203
9204         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9205                 return;
9206
9207         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9208         if (!disconnected_acrtc)
9209                 return;
9210
9211         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9212         if (!acrtc_state->stream)
9213                 return;
9214
9215         /*
9216          * If the previous sink is not released and different from the current,
9217          * we deduce we are in a state where we can not rely on usermode call
9218          * to turn on the display, so we do it here
9219          */
9220         if (acrtc_state->stream->sink != aconnector->dc_sink)
9221                 dm_force_atomic_commit(&aconnector->base);
9222 }
9223
9224 /*
9225  * Grabs all modesetting locks to serialize against any blocking commits,
9226  * Waits for completion of all non blocking commits.
9227  */
9228 static int do_aquire_global_lock(struct drm_device *dev,
9229                                  struct drm_atomic_state *state)
9230 {
9231         struct drm_crtc *crtc;
9232         struct drm_crtc_commit *commit;
9233         long ret;
9234
9235         /*
9236          * Adding all modeset locks to aquire_ctx will
9237          * ensure that when the framework release it the
9238          * extra locks we are locking here will get released to
9239          */
9240         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9241         if (ret)
9242                 return ret;
9243
9244         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9245                 spin_lock(&crtc->commit_lock);
9246                 commit = list_first_entry_or_null(&crtc->commit_list,
9247                                 struct drm_crtc_commit, commit_entry);
9248                 if (commit)
9249                         drm_crtc_commit_get(commit);
9250                 spin_unlock(&crtc->commit_lock);
9251
9252                 if (!commit)
9253                         continue;
9254
9255                 /*
9256                  * Make sure all pending HW programming completed and
9257                  * page flips done
9258                  */
9259                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9260
9261                 if (ret > 0)
9262                         ret = wait_for_completion_interruptible_timeout(
9263                                         &commit->flip_done, 10*HZ);
9264
9265                 if (ret == 0)
9266                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9267                                   "timed out\n", crtc->base.id, crtc->name);
9268
9269                 drm_crtc_commit_put(commit);
9270         }
9271
9272         return ret < 0 ? ret : 0;
9273 }
9274
9275 static void get_freesync_config_for_crtc(
9276         struct dm_crtc_state *new_crtc_state,
9277         struct dm_connector_state *new_con_state)
9278 {
9279         struct mod_freesync_config config = {0};
9280         struct amdgpu_dm_connector *aconnector =
9281                         to_amdgpu_dm_connector(new_con_state->base.connector);
9282         struct drm_display_mode *mode = &new_crtc_state->base.mode;
9283         int vrefresh = drm_mode_vrefresh(mode);
9284         bool fs_vid_mode = false;
9285
9286         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9287                                         vrefresh >= aconnector->min_vfreq &&
9288                                         vrefresh <= aconnector->max_vfreq;
9289
9290         if (new_crtc_state->vrr_supported) {
9291                 new_crtc_state->stream->ignore_msa_timing_param = true;
9292                 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9293
9294                 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9295                 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9296                 config.vsif_supported = true;
9297                 config.btr = true;
9298
9299                 if (fs_vid_mode) {
9300                         config.state = VRR_STATE_ACTIVE_FIXED;
9301                         config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9302                         goto out;
9303                 } else if (new_crtc_state->base.vrr_enabled) {
9304                         config.state = VRR_STATE_ACTIVE_VARIABLE;
9305                 } else {
9306                         config.state = VRR_STATE_INACTIVE;
9307                 }
9308         }
9309 out:
9310         new_crtc_state->freesync_config = config;
9311 }
9312
9313 static void reset_freesync_config_for_crtc(
9314         struct dm_crtc_state *new_crtc_state)
9315 {
9316         new_crtc_state->vrr_supported = false;
9317
9318         memset(&new_crtc_state->vrr_infopacket, 0,
9319                sizeof(new_crtc_state->vrr_infopacket));
9320 }
9321
9322 static bool
9323 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9324                                  struct drm_crtc_state *new_crtc_state)
9325 {
9326         struct drm_display_mode old_mode, new_mode;
9327
9328         if (!old_crtc_state || !new_crtc_state)
9329                 return false;
9330
9331         old_mode = old_crtc_state->mode;
9332         new_mode = new_crtc_state->mode;
9333
9334         if (old_mode.clock       == new_mode.clock &&
9335             old_mode.hdisplay    == new_mode.hdisplay &&
9336             old_mode.vdisplay    == new_mode.vdisplay &&
9337             old_mode.htotal      == new_mode.htotal &&
9338             old_mode.vtotal      != new_mode.vtotal &&
9339             old_mode.hsync_start == new_mode.hsync_start &&
9340             old_mode.vsync_start != new_mode.vsync_start &&
9341             old_mode.hsync_end   == new_mode.hsync_end &&
9342             old_mode.vsync_end   != new_mode.vsync_end &&
9343             old_mode.hskew       == new_mode.hskew &&
9344             old_mode.vscan       == new_mode.vscan &&
9345             (old_mode.vsync_end - old_mode.vsync_start) ==
9346             (new_mode.vsync_end - new_mode.vsync_start))
9347                 return true;
9348
9349         return false;
9350 }
9351
9352 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9353         uint64_t num, den, res;
9354         struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9355
9356         dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9357
9358         num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9359         den = (unsigned long long)new_crtc_state->mode.htotal *
9360               (unsigned long long)new_crtc_state->mode.vtotal;
9361
9362         res = div_u64(num, den);
9363         dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9364 }
9365
9366 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9367                                 struct drm_atomic_state *state,
9368                                 struct drm_crtc *crtc,
9369                                 struct drm_crtc_state *old_crtc_state,
9370                                 struct drm_crtc_state *new_crtc_state,
9371                                 bool enable,
9372                                 bool *lock_and_validation_needed)
9373 {
9374         struct dm_atomic_state *dm_state = NULL;
9375         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9376         struct dc_stream_state *new_stream;
9377         int ret = 0;
9378
9379         /*
9380          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9381          * update changed items
9382          */
9383         struct amdgpu_crtc *acrtc = NULL;
9384         struct amdgpu_dm_connector *aconnector = NULL;
9385         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9386         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
9387
9388         new_stream = NULL;
9389
9390         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9391         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9392         acrtc = to_amdgpu_crtc(crtc);
9393         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
9394
9395         /* TODO This hack should go away */
9396         if (aconnector && enable) {
9397                 /* Make sure fake sink is created in plug-in scenario */
9398                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9399                                                             &aconnector->base);
9400                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9401                                                             &aconnector->base);
9402
9403                 if (IS_ERR(drm_new_conn_state)) {
9404                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9405                         goto fail;
9406                 }
9407
9408                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9409                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
9410
9411                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9412                         goto skip_modeset;
9413
9414                 new_stream = create_validate_stream_for_sink(aconnector,
9415                                                              &new_crtc_state->mode,
9416                                                              dm_new_conn_state,
9417                                                              dm_old_crtc_state->stream);
9418
9419                 /*
9420                  * we can have no stream on ACTION_SET if a display
9421                  * was disconnected during S3, in this case it is not an
9422                  * error, the OS will be updated after detection, and
9423                  * will do the right thing on next atomic commit
9424                  */
9425
9426                 if (!new_stream) {
9427                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9428                                         __func__, acrtc->base.base.id);
9429                         ret = -ENOMEM;
9430                         goto fail;
9431                 }
9432
9433                 /*
9434                  * TODO: Check VSDB bits to decide whether this should
9435                  * be enabled or not.
9436                  */
9437                 new_stream->triggered_crtc_reset.enabled =
9438                         dm->force_timing_sync;
9439
9440                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9441
9442                 ret = fill_hdr_info_packet(drm_new_conn_state,
9443                                            &new_stream->hdr_static_metadata);
9444                 if (ret)
9445                         goto fail;
9446
9447                 /*
9448                  * If we already removed the old stream from the context
9449                  * (and set the new stream to NULL) then we can't reuse
9450                  * the old stream even if the stream and scaling are unchanged.
9451                  * We'll hit the BUG_ON and black screen.
9452                  *
9453                  * TODO: Refactor this function to allow this check to work
9454                  * in all conditions.
9455                  */
9456                 if (amdgpu_freesync_vid_mode &&
9457                     dm_new_crtc_state->stream &&
9458                     is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9459                         goto skip_modeset;
9460
9461                 if (dm_new_crtc_state->stream &&
9462                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9463                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9464                         new_crtc_state->mode_changed = false;
9465                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9466                                          new_crtc_state->mode_changed);
9467                 }
9468         }
9469
9470         /* mode_changed flag may get updated above, need to check again */
9471         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9472                 goto skip_modeset;
9473
9474         DRM_DEBUG_ATOMIC(
9475                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9476                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9477                 "connectors_changed:%d\n",
9478                 acrtc->crtc_id,
9479                 new_crtc_state->enable,
9480                 new_crtc_state->active,
9481                 new_crtc_state->planes_changed,
9482                 new_crtc_state->mode_changed,
9483                 new_crtc_state->active_changed,
9484                 new_crtc_state->connectors_changed);
9485
9486         /* Remove stream for any changed/disabled CRTC */
9487         if (!enable) {
9488
9489                 if (!dm_old_crtc_state->stream)
9490                         goto skip_modeset;
9491
9492                 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
9493                     is_timing_unchanged_for_freesync(new_crtc_state,
9494                                                      old_crtc_state)) {
9495                         new_crtc_state->mode_changed = false;
9496                         DRM_DEBUG_DRIVER(
9497                                 "Mode change not required for front porch change, "
9498                                 "setting mode_changed to %d",
9499                                 new_crtc_state->mode_changed);
9500
9501                         set_freesync_fixed_config(dm_new_crtc_state);
9502
9503                         goto skip_modeset;
9504                 } else if (amdgpu_freesync_vid_mode && aconnector &&
9505                            is_freesync_video_mode(&new_crtc_state->mode,
9506                                                   aconnector)) {
9507                         set_freesync_fixed_config(dm_new_crtc_state);
9508                 }
9509
9510                 ret = dm_atomic_get_state(state, &dm_state);
9511                 if (ret)
9512                         goto fail;
9513
9514                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9515                                 crtc->base.id);
9516
9517                 /* i.e. reset mode */
9518                 if (dc_remove_stream_from_ctx(
9519                                 dm->dc,
9520                                 dm_state->context,
9521                                 dm_old_crtc_state->stream) != DC_OK) {
9522                         ret = -EINVAL;
9523                         goto fail;
9524                 }
9525
9526                 dc_stream_release(dm_old_crtc_state->stream);
9527                 dm_new_crtc_state->stream = NULL;
9528
9529                 reset_freesync_config_for_crtc(dm_new_crtc_state);
9530
9531                 *lock_and_validation_needed = true;
9532
9533         } else {/* Add stream for any updated/enabled CRTC */
9534                 /*
9535                  * Quick fix to prevent NULL pointer on new_stream when
9536                  * added MST connectors not found in existing crtc_state in the chained mode
9537                  * TODO: need to dig out the root cause of that
9538                  */
9539                 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
9540                         goto skip_modeset;
9541
9542                 if (modereset_required(new_crtc_state))
9543                         goto skip_modeset;
9544
9545                 if (modeset_required(new_crtc_state, new_stream,
9546                                      dm_old_crtc_state->stream)) {
9547
9548                         WARN_ON(dm_new_crtc_state->stream);
9549
9550                         ret = dm_atomic_get_state(state, &dm_state);
9551                         if (ret)
9552                                 goto fail;
9553
9554                         dm_new_crtc_state->stream = new_stream;
9555
9556                         dc_stream_retain(new_stream);
9557
9558                         DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
9559                                          crtc->base.id);
9560
9561                         if (dc_add_stream_to_ctx(
9562                                         dm->dc,
9563                                         dm_state->context,
9564                                         dm_new_crtc_state->stream) != DC_OK) {
9565                                 ret = -EINVAL;
9566                                 goto fail;
9567                         }
9568
9569                         *lock_and_validation_needed = true;
9570                 }
9571         }
9572
9573 skip_modeset:
9574         /* Release extra reference */
9575         if (new_stream)
9576                  dc_stream_release(new_stream);
9577
9578         /*
9579          * We want to do dc stream updates that do not require a
9580          * full modeset below.
9581          */
9582         if (!(enable && aconnector && new_crtc_state->active))
9583                 return 0;
9584         /*
9585          * Given above conditions, the dc state cannot be NULL because:
9586          * 1. We're in the process of enabling CRTCs (just been added
9587          *    to the dc context, or already is on the context)
9588          * 2. Has a valid connector attached, and
9589          * 3. Is currently active and enabled.
9590          * => The dc stream state currently exists.
9591          */
9592         BUG_ON(dm_new_crtc_state->stream == NULL);
9593
9594         /* Scaling or underscan settings */
9595         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
9596                 update_stream_scaling_settings(
9597                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
9598
9599         /* ABM settings */
9600         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9601
9602         /*
9603          * Color management settings. We also update color properties
9604          * when a modeset is needed, to ensure it gets reprogrammed.
9605          */
9606         if (dm_new_crtc_state->base.color_mgmt_changed ||
9607             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9608                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
9609                 if (ret)
9610                         goto fail;
9611         }
9612
9613         /* Update Freesync settings. */
9614         get_freesync_config_for_crtc(dm_new_crtc_state,
9615                                      dm_new_conn_state);
9616
9617         return ret;
9618
9619 fail:
9620         if (new_stream)
9621                 dc_stream_release(new_stream);
9622         return ret;
9623 }
9624
9625 static bool should_reset_plane(struct drm_atomic_state *state,
9626                                struct drm_plane *plane,
9627                                struct drm_plane_state *old_plane_state,
9628                                struct drm_plane_state *new_plane_state)
9629 {
9630         struct drm_plane *other;
9631         struct drm_plane_state *old_other_state, *new_other_state;
9632         struct drm_crtc_state *new_crtc_state;
9633         int i;
9634
9635         /*
9636          * TODO: Remove this hack once the checks below are sufficient
9637          * enough to determine when we need to reset all the planes on
9638          * the stream.
9639          */
9640         if (state->allow_modeset)
9641                 return true;
9642
9643         /* Exit early if we know that we're adding or removing the plane. */
9644         if (old_plane_state->crtc != new_plane_state->crtc)
9645                 return true;
9646
9647         /* old crtc == new_crtc == NULL, plane not in context. */
9648         if (!new_plane_state->crtc)
9649                 return false;
9650
9651         new_crtc_state =
9652                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9653
9654         if (!new_crtc_state)
9655                 return true;
9656
9657         /* CRTC Degamma changes currently require us to recreate planes. */
9658         if (new_crtc_state->color_mgmt_changed)
9659                 return true;
9660
9661         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9662                 return true;
9663
9664         /*
9665          * If there are any new primary or overlay planes being added or
9666          * removed then the z-order can potentially change. To ensure
9667          * correct z-order and pipe acquisition the current DC architecture
9668          * requires us to remove and recreate all existing planes.
9669          *
9670          * TODO: Come up with a more elegant solution for this.
9671          */
9672         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9673                 struct amdgpu_framebuffer *old_afb, *new_afb;
9674                 if (other->type == DRM_PLANE_TYPE_CURSOR)
9675                         continue;
9676
9677                 if (old_other_state->crtc != new_plane_state->crtc &&
9678                     new_other_state->crtc != new_plane_state->crtc)
9679                         continue;
9680
9681                 if (old_other_state->crtc != new_other_state->crtc)
9682                         return true;
9683
9684                 /* Src/dst size and scaling updates. */
9685                 if (old_other_state->src_w != new_other_state->src_w ||
9686                     old_other_state->src_h != new_other_state->src_h ||
9687                     old_other_state->crtc_w != new_other_state->crtc_w ||
9688                     old_other_state->crtc_h != new_other_state->crtc_h)
9689                         return true;
9690
9691                 /* Rotation / mirroring updates. */
9692                 if (old_other_state->rotation != new_other_state->rotation)
9693                         return true;
9694
9695                 /* Blending updates. */
9696                 if (old_other_state->pixel_blend_mode !=
9697                     new_other_state->pixel_blend_mode)
9698                         return true;
9699
9700                 /* Alpha updates. */
9701                 if (old_other_state->alpha != new_other_state->alpha)
9702                         return true;
9703
9704                 /* Colorspace changes. */
9705                 if (old_other_state->color_range != new_other_state->color_range ||
9706                     old_other_state->color_encoding != new_other_state->color_encoding)
9707                         return true;
9708
9709                 /* Framebuffer checks fall at the end. */
9710                 if (!old_other_state->fb || !new_other_state->fb)
9711                         continue;
9712
9713                 /* Pixel format changes can require bandwidth updates. */
9714                 if (old_other_state->fb->format != new_other_state->fb->format)
9715                         return true;
9716
9717                 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9718                 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9719
9720                 /* Tiling and DCC changes also require bandwidth updates. */
9721                 if (old_afb->tiling_flags != new_afb->tiling_flags ||
9722                     old_afb->base.modifier != new_afb->base.modifier)
9723                         return true;
9724         }
9725
9726         return false;
9727 }
9728
9729 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9730                               struct drm_plane_state *new_plane_state,
9731                               struct drm_framebuffer *fb)
9732 {
9733         struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9734         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9735         unsigned int pitch;
9736         bool linear;
9737
9738         if (fb->width > new_acrtc->max_cursor_width ||
9739             fb->height > new_acrtc->max_cursor_height) {
9740                 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9741                                  new_plane_state->fb->width,
9742                                  new_plane_state->fb->height);
9743                 return -EINVAL;
9744         }
9745         if (new_plane_state->src_w != fb->width << 16 ||
9746             new_plane_state->src_h != fb->height << 16) {
9747                 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9748                 return -EINVAL;
9749         }
9750
9751         /* Pitch in pixels */
9752         pitch = fb->pitches[0] / fb->format->cpp[0];
9753
9754         if (fb->width != pitch) {
9755                 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9756                                  fb->width, pitch);
9757                 return -EINVAL;
9758         }
9759
9760         switch (pitch) {
9761         case 64:
9762         case 128:
9763         case 256:
9764                 /* FB pitch is supported by cursor plane */
9765                 break;
9766         default:
9767                 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9768                 return -EINVAL;
9769         }
9770
9771         /* Core DRM takes care of checking FB modifiers, so we only need to
9772          * check tiling flags when the FB doesn't have a modifier. */
9773         if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9774                 if (adev->family < AMDGPU_FAMILY_AI) {
9775                         linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9776                                  AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9777                                  AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9778                 } else {
9779                         linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9780                 }
9781                 if (!linear) {
9782                         DRM_DEBUG_ATOMIC("Cursor FB not linear");
9783                         return -EINVAL;
9784                 }
9785         }
9786
9787         return 0;
9788 }
9789
9790 static int dm_update_plane_state(struct dc *dc,
9791                                  struct drm_atomic_state *state,
9792                                  struct drm_plane *plane,
9793                                  struct drm_plane_state *old_plane_state,
9794                                  struct drm_plane_state *new_plane_state,
9795                                  bool enable,
9796                                  bool *lock_and_validation_needed)
9797 {
9798
9799         struct dm_atomic_state *dm_state = NULL;
9800         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9801         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9802         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9803         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9804         struct amdgpu_crtc *new_acrtc;
9805         bool needs_reset;
9806         int ret = 0;
9807
9808
9809         new_plane_crtc = new_plane_state->crtc;
9810         old_plane_crtc = old_plane_state->crtc;
9811         dm_new_plane_state = to_dm_plane_state(new_plane_state);
9812         dm_old_plane_state = to_dm_plane_state(old_plane_state);
9813
9814         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9815                 if (!enable || !new_plane_crtc ||
9816                         drm_atomic_plane_disabling(plane->state, new_plane_state))
9817                         return 0;
9818
9819                 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9820
9821                 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9822                         DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9823                         return -EINVAL;
9824                 }
9825
9826                 if (new_plane_state->fb) {
9827                         ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9828                                                  new_plane_state->fb);
9829                         if (ret)
9830                                 return ret;
9831                 }
9832
9833                 return 0;
9834         }
9835
9836         needs_reset = should_reset_plane(state, plane, old_plane_state,
9837                                          new_plane_state);
9838
9839         /* Remove any changed/removed planes */
9840         if (!enable) {
9841                 if (!needs_reset)
9842                         return 0;
9843
9844                 if (!old_plane_crtc)
9845                         return 0;
9846
9847                 old_crtc_state = drm_atomic_get_old_crtc_state(
9848                                 state, old_plane_crtc);
9849                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9850
9851                 if (!dm_old_crtc_state->stream)
9852                         return 0;
9853
9854                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9855                                 plane->base.id, old_plane_crtc->base.id);
9856
9857                 ret = dm_atomic_get_state(state, &dm_state);
9858                 if (ret)
9859                         return ret;
9860
9861                 if (!dc_remove_plane_from_context(
9862                                 dc,
9863                                 dm_old_crtc_state->stream,
9864                                 dm_old_plane_state->dc_state,
9865                                 dm_state->context)) {
9866
9867                         return -EINVAL;
9868                 }
9869
9870
9871                 dc_plane_state_release(dm_old_plane_state->dc_state);
9872                 dm_new_plane_state->dc_state = NULL;
9873
9874                 *lock_and_validation_needed = true;
9875
9876         } else { /* Add new planes */
9877                 struct dc_plane_state *dc_new_plane_state;
9878
9879                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9880                         return 0;
9881
9882                 if (!new_plane_crtc)
9883                         return 0;
9884
9885                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9886                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9887
9888                 if (!dm_new_crtc_state->stream)
9889                         return 0;
9890
9891                 if (!needs_reset)
9892                         return 0;
9893
9894                 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9895                 if (ret)
9896                         return ret;
9897
9898                 WARN_ON(dm_new_plane_state->dc_state);
9899
9900                 dc_new_plane_state = dc_create_plane_state(dc);
9901                 if (!dc_new_plane_state)
9902                         return -ENOMEM;
9903
9904                 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
9905                                  plane->base.id, new_plane_crtc->base.id);
9906
9907                 ret = fill_dc_plane_attributes(
9908                         drm_to_adev(new_plane_crtc->dev),
9909                         dc_new_plane_state,
9910                         new_plane_state,
9911                         new_crtc_state);
9912                 if (ret) {
9913                         dc_plane_state_release(dc_new_plane_state);
9914                         return ret;
9915                 }
9916
9917                 ret = dm_atomic_get_state(state, &dm_state);
9918                 if (ret) {
9919                         dc_plane_state_release(dc_new_plane_state);
9920                         return ret;
9921                 }
9922
9923                 /*
9924                  * Any atomic check errors that occur after this will
9925                  * not need a release. The plane state will be attached
9926                  * to the stream, and therefore part of the atomic
9927                  * state. It'll be released when the atomic state is
9928                  * cleaned.
9929                  */
9930                 if (!dc_add_plane_to_context(
9931                                 dc,
9932                                 dm_new_crtc_state->stream,
9933                                 dc_new_plane_state,
9934                                 dm_state->context)) {
9935
9936                         dc_plane_state_release(dc_new_plane_state);
9937                         return -EINVAL;
9938                 }
9939
9940                 dm_new_plane_state->dc_state = dc_new_plane_state;
9941
9942                 /* Tell DC to do a full surface update every time there
9943                  * is a plane change. Inefficient, but works for now.
9944                  */
9945                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9946
9947                 *lock_and_validation_needed = true;
9948         }
9949
9950
9951         return ret;
9952 }
9953
9954 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9955                                 struct drm_crtc *crtc,
9956                                 struct drm_crtc_state *new_crtc_state)
9957 {
9958         struct drm_plane_state *new_cursor_state, *new_primary_state;
9959         int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9960
9961         /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9962          * cursor per pipe but it's going to inherit the scaling and
9963          * positioning from the underlying pipe. Check the cursor plane's
9964          * blending properties match the primary plane's. */
9965
9966         new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9967         new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9968         if (!new_cursor_state || !new_primary_state ||
9969             !new_cursor_state->fb || !new_primary_state->fb) {
9970                 return 0;
9971         }
9972
9973         cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9974                          (new_cursor_state->src_w >> 16);
9975         cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9976                          (new_cursor_state->src_h >> 16);
9977
9978         primary_scale_w = new_primary_state->crtc_w * 1000 /
9979                          (new_primary_state->src_w >> 16);
9980         primary_scale_h = new_primary_state->crtc_h * 1000 /
9981                          (new_primary_state->src_h >> 16);
9982
9983         if (cursor_scale_w != primary_scale_w ||
9984             cursor_scale_h != primary_scale_h) {
9985                 DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9986                 return -EINVAL;
9987         }
9988
9989         return 0;
9990 }
9991
9992 #if defined(CONFIG_DRM_AMD_DC_DCN)
9993 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9994 {
9995         struct drm_connector *connector;
9996         struct drm_connector_state *conn_state;
9997         struct amdgpu_dm_connector *aconnector = NULL;
9998         int i;
9999         for_each_new_connector_in_state(state, connector, conn_state, i) {
10000                 if (conn_state->crtc != crtc)
10001                         continue;
10002
10003                 aconnector = to_amdgpu_dm_connector(connector);
10004                 if (!aconnector->port || !aconnector->mst_port)
10005                         aconnector = NULL;
10006                 else
10007                         break;
10008         }
10009
10010         if (!aconnector)
10011                 return 0;
10012
10013         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10014 }
10015 #endif
10016
10017 static int validate_overlay(struct drm_atomic_state *state)
10018 {
10019         int i;
10020         struct drm_plane *plane;
10021         struct drm_plane_state *old_plane_state, *new_plane_state;
10022         struct drm_plane_state *primary_state, *overlay_state = NULL;
10023
10024         /* Check if primary plane is contained inside overlay */
10025         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10026                 if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
10027                         if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10028                                 return 0;
10029
10030                         overlay_state = new_plane_state;
10031                         continue;
10032                 }
10033         }
10034
10035         /* check if we're making changes to the overlay plane */
10036         if (!overlay_state)
10037                 return 0;
10038
10039         /* check if overlay plane is enabled */
10040         if (!overlay_state->crtc)
10041                 return 0;
10042
10043         /* find the primary plane for the CRTC that the overlay is enabled on */
10044         primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
10045         if (IS_ERR(primary_state))
10046                 return PTR_ERR(primary_state);
10047
10048         /* check if primary plane is enabled */
10049         if (!primary_state->crtc)
10050                 return 0;
10051
10052         /* Perform the bounds check to ensure the overlay plane covers the primary */
10053         if (primary_state->crtc_x < overlay_state->crtc_x ||
10054             primary_state->crtc_y < overlay_state->crtc_y ||
10055             primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
10056             primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
10057                 DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
10058                 return -EINVAL;
10059         }
10060
10061         return 0;
10062 }
10063
10064 /**
10065  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10066  * @dev: The DRM device
10067  * @state: The atomic state to commit
10068  *
10069  * Validate that the given atomic state is programmable by DC into hardware.
10070  * This involves constructing a &struct dc_state reflecting the new hardware
10071  * state we wish to commit, then querying DC to see if it is programmable. It's
10072  * important not to modify the existing DC state. Otherwise, atomic_check
10073  * may unexpectedly commit hardware changes.
10074  *
10075  * When validating the DC state, it's important that the right locks are
10076  * acquired. For full updates case which removes/adds/updates streams on one
10077  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10078  * that any such full update commit will wait for completion of any outstanding
10079  * flip using DRMs synchronization events.
10080  *
10081  * Note that DM adds the affected connectors for all CRTCs in state, when that
10082  * might not seem necessary. This is because DC stream creation requires the
10083  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10084  * be possible but non-trivial - a possible TODO item.
10085  *
10086  * Return: -Error code if validation failed.
10087  */
10088 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10089                                   struct drm_atomic_state *state)
10090 {
10091         struct amdgpu_device *adev = drm_to_adev(dev);
10092         struct dm_atomic_state *dm_state = NULL;
10093         struct dc *dc = adev->dm.dc;
10094         struct drm_connector *connector;
10095         struct drm_connector_state *old_con_state, *new_con_state;
10096         struct drm_crtc *crtc;
10097         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10098         struct drm_plane *plane;
10099         struct drm_plane_state *old_plane_state, *new_plane_state;
10100         enum dc_status status;
10101         int ret, i;
10102         bool lock_and_validation_needed = false;
10103         struct dm_crtc_state *dm_old_crtc_state;
10104
10105         trace_amdgpu_dm_atomic_check_begin(state);
10106
10107         ret = drm_atomic_helper_check_modeset(dev, state);
10108         if (ret)
10109                 goto fail;
10110
10111         /* Check connector changes */
10112         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10113                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10114                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10115
10116                 /* Skip connectors that are disabled or part of modeset already. */
10117                 if (!old_con_state->crtc && !new_con_state->crtc)
10118                         continue;
10119
10120                 if (!new_con_state->crtc)
10121                         continue;
10122
10123                 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10124                 if (IS_ERR(new_crtc_state)) {
10125                         ret = PTR_ERR(new_crtc_state);
10126                         goto fail;
10127                 }
10128
10129                 if (dm_old_con_state->abm_level !=
10130                     dm_new_con_state->abm_level)
10131                         new_crtc_state->connectors_changed = true;
10132         }
10133
10134 #if defined(CONFIG_DRM_AMD_DC_DCN)
10135         if (dc_resource_is_dsc_encoding_supported(dc)) {
10136                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10137                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10138                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
10139                                 if (ret)
10140                                         goto fail;
10141                         }
10142                 }
10143         }
10144 #endif
10145         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10146                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10147
10148                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10149                     !new_crtc_state->color_mgmt_changed &&
10150                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10151                         dm_old_crtc_state->dsc_force_changed == false)
10152                         continue;
10153
10154                 if (!new_crtc_state->enable)
10155                         continue;
10156
10157                 ret = drm_atomic_add_affected_connectors(state, crtc);
10158                 if (ret)
10159                         return ret;
10160
10161                 ret = drm_atomic_add_affected_planes(state, crtc);
10162                 if (ret)
10163                         goto fail;
10164
10165                 if (dm_old_crtc_state->dsc_force_changed)
10166                         new_crtc_state->mode_changed = true;
10167         }
10168
10169         /*
10170          * Add all primary and overlay planes on the CRTC to the state
10171          * whenever a plane is enabled to maintain correct z-ordering
10172          * and to enable fast surface updates.
10173          */
10174         drm_for_each_crtc(crtc, dev) {
10175                 bool modified = false;
10176
10177                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10178                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
10179                                 continue;
10180
10181                         if (new_plane_state->crtc == crtc ||
10182                             old_plane_state->crtc == crtc) {
10183                                 modified = true;
10184                                 break;
10185                         }
10186                 }
10187
10188                 if (!modified)
10189                         continue;
10190
10191                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10192                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
10193                                 continue;
10194
10195                         new_plane_state =
10196                                 drm_atomic_get_plane_state(state, plane);
10197
10198                         if (IS_ERR(new_plane_state)) {
10199                                 ret = PTR_ERR(new_plane_state);
10200                                 goto fail;
10201                         }
10202                 }
10203         }
10204
10205         /* Remove exiting planes if they are modified */
10206         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10207                 ret = dm_update_plane_state(dc, state, plane,
10208                                             old_plane_state,
10209                                             new_plane_state,
10210                                             false,
10211                                             &lock_and_validation_needed);
10212                 if (ret)
10213                         goto fail;
10214         }
10215
10216         /* Disable all crtcs which require disable */
10217         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10218                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10219                                            old_crtc_state,
10220                                            new_crtc_state,
10221                                            false,
10222                                            &lock_and_validation_needed);
10223                 if (ret)
10224                         goto fail;
10225         }
10226
10227         /* Enable all crtcs which require enable */
10228         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10229                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10230                                            old_crtc_state,
10231                                            new_crtc_state,
10232                                            true,
10233                                            &lock_and_validation_needed);
10234                 if (ret)
10235                         goto fail;
10236         }
10237
10238         ret = validate_overlay(state);
10239         if (ret)
10240                 goto fail;
10241
10242         /* Add new/modified planes */
10243         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10244                 ret = dm_update_plane_state(dc, state, plane,
10245                                             old_plane_state,
10246                                             new_plane_state,
10247                                             true,
10248                                             &lock_and_validation_needed);
10249                 if (ret)
10250                         goto fail;
10251         }
10252
10253         /* Run this here since we want to validate the streams we created */
10254         ret = drm_atomic_helper_check_planes(dev, state);
10255         if (ret)
10256                 goto fail;
10257
10258         /* Check cursor planes scaling */
10259         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10260                 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10261                 if (ret)
10262                         goto fail;
10263         }
10264
10265         if (state->legacy_cursor_update) {
10266                 /*
10267                  * This is a fast cursor update coming from the plane update
10268                  * helper, check if it can be done asynchronously for better
10269                  * performance.
10270                  */
10271                 state->async_update =
10272                         !drm_atomic_helper_async_check(dev, state);
10273
10274                 /*
10275                  * Skip the remaining global validation if this is an async
10276                  * update. Cursor updates can be done without affecting
10277                  * state or bandwidth calcs and this avoids the performance
10278                  * penalty of locking the private state object and
10279                  * allocating a new dc_state.
10280                  */
10281                 if (state->async_update)
10282                         return 0;
10283         }
10284
10285         /* Check scaling and underscan changes*/
10286         /* TODO Removed scaling changes validation due to inability to commit
10287          * new stream into context w\o causing full reset. Need to
10288          * decide how to handle.
10289          */
10290         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10291                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10292                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10293                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10294
10295                 /* Skip any modesets/resets */
10296                 if (!acrtc || drm_atomic_crtc_needs_modeset(
10297                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10298                         continue;
10299
10300                 /* Skip any thing not scale or underscan changes */
10301                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10302                         continue;
10303
10304                 lock_and_validation_needed = true;
10305         }
10306
10307         /**
10308          * Streams and planes are reset when there are changes that affect
10309          * bandwidth. Anything that affects bandwidth needs to go through
10310          * DC global validation to ensure that the configuration can be applied
10311          * to hardware.
10312          *
10313          * We have to currently stall out here in atomic_check for outstanding
10314          * commits to finish in this case because our IRQ handlers reference
10315          * DRM state directly - we can end up disabling interrupts too early
10316          * if we don't.
10317          *
10318          * TODO: Remove this stall and drop DM state private objects.
10319          */
10320         if (lock_and_validation_needed) {
10321                 ret = dm_atomic_get_state(state, &dm_state);
10322                 if (ret)
10323                         goto fail;
10324
10325                 ret = do_aquire_global_lock(dev, state);
10326                 if (ret)
10327                         goto fail;
10328
10329 #if defined(CONFIG_DRM_AMD_DC_DCN)
10330                 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
10331                         goto fail;
10332
10333                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
10334                 if (ret)
10335                         goto fail;
10336 #endif
10337
10338                 /*
10339                  * Perform validation of MST topology in the state:
10340                  * We need to perform MST atomic check before calling
10341                  * dc_validate_global_state(), or there is a chance
10342                  * to get stuck in an infinite loop and hang eventually.
10343                  */
10344                 ret = drm_dp_mst_atomic_check(state);
10345                 if (ret)
10346                         goto fail;
10347                 status = dc_validate_global_state(dc, dm_state->context, false);
10348                 if (status != DC_OK) {
10349                         DC_LOG_WARNING("DC global validation failure: %s (%d)",
10350                                        dc_status_to_str(status), status);
10351                         ret = -EINVAL;
10352                         goto fail;
10353                 }
10354         } else {
10355                 /*
10356                  * The commit is a fast update. Fast updates shouldn't change
10357                  * the DC context, affect global validation, and can have their
10358                  * commit work done in parallel with other commits not touching
10359                  * the same resource. If we have a new DC context as part of
10360                  * the DM atomic state from validation we need to free it and
10361                  * retain the existing one instead.
10362                  *
10363                  * Furthermore, since the DM atomic state only contains the DC
10364                  * context and can safely be annulled, we can free the state
10365                  * and clear the associated private object now to free
10366                  * some memory and avoid a possible use-after-free later.
10367                  */
10368
10369                 for (i = 0; i < state->num_private_objs; i++) {
10370                         struct drm_private_obj *obj = state->private_objs[i].ptr;
10371
10372                         if (obj->funcs == adev->dm.atomic_obj.funcs) {
10373                                 int j = state->num_private_objs-1;
10374
10375                                 dm_atomic_destroy_state(obj,
10376                                                 state->private_objs[i].state);
10377
10378                                 /* If i is not at the end of the array then the
10379                                  * last element needs to be moved to where i was
10380                                  * before the array can safely be truncated.
10381                                  */
10382                                 if (i != j)
10383                                         state->private_objs[i] =
10384                                                 state->private_objs[j];
10385
10386                                 state->private_objs[j].ptr = NULL;
10387                                 state->private_objs[j].state = NULL;
10388                                 state->private_objs[j].old_state = NULL;
10389                                 state->private_objs[j].new_state = NULL;
10390
10391                                 state->num_private_objs = j;
10392                                 break;
10393                         }
10394                 }
10395         }
10396
10397         /* Store the overall update type for use later in atomic check. */
10398         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10399                 struct dm_crtc_state *dm_new_crtc_state =
10400                         to_dm_crtc_state(new_crtc_state);
10401
10402                 dm_new_crtc_state->update_type = lock_and_validation_needed ?
10403                                                          UPDATE_TYPE_FULL :
10404                                                          UPDATE_TYPE_FAST;
10405         }
10406
10407         /* Must be success */
10408         WARN_ON(ret);
10409
10410         trace_amdgpu_dm_atomic_check_finish(state, ret);
10411
10412         return ret;
10413
10414 fail:
10415         if (ret == -EDEADLK)
10416                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
10417         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
10418                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
10419         else
10420                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
10421
10422         trace_amdgpu_dm_atomic_check_finish(state, ret);
10423
10424         return ret;
10425 }
10426
10427 static bool is_dp_capable_without_timing_msa(struct dc *dc,
10428                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
10429 {
10430         uint8_t dpcd_data;
10431         bool capable = false;
10432
10433         if (amdgpu_dm_connector->dc_link &&
10434                 dm_helpers_dp_read_dpcd(
10435                                 NULL,
10436                                 amdgpu_dm_connector->dc_link,
10437                                 DP_DOWN_STREAM_PORT_COUNT,
10438                                 &dpcd_data,
10439                                 sizeof(dpcd_data))) {
10440                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10441         }
10442
10443         return capable;
10444 }
10445
10446 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10447                 uint8_t *edid_ext, int len,
10448                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10449 {
10450         int i;
10451         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10452         struct dc *dc = adev->dm.dc;
10453
10454         /* send extension block to DMCU for parsing */
10455         for (i = 0; i < len; i += 8) {
10456                 bool res;
10457                 int offset;
10458
10459                 /* send 8 bytes a time */
10460                 if (!dc_edid_parser_send_cea(dc, i, len, &edid_ext[i], 8))
10461                         return false;
10462
10463                 if (i+8 == len) {
10464                         /* EDID block sent completed, expect result */
10465                         int version, min_rate, max_rate;
10466
10467                         res = dc_edid_parser_recv_amd_vsdb(dc, &version, &min_rate, &max_rate);
10468                         if (res) {
10469                                 /* amd vsdb found */
10470                                 vsdb_info->freesync_supported = 1;
10471                                 vsdb_info->amd_vsdb_version = version;
10472                                 vsdb_info->min_refresh_rate_hz = min_rate;
10473                                 vsdb_info->max_refresh_rate_hz = max_rate;
10474                                 return true;
10475                         }
10476                         /* not amd vsdb */
10477                         return false;
10478                 }
10479
10480                 /* check for ack*/
10481                 res = dc_edid_parser_recv_cea_ack(dc, &offset);
10482                 if (!res)
10483                         return false;
10484         }
10485
10486         return false;
10487 }
10488
10489 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10490                 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10491 {
10492         uint8_t *edid_ext = NULL;
10493         int i;
10494         bool valid_vsdb_found = false;
10495
10496         /*----- drm_find_cea_extension() -----*/
10497         /* No EDID or EDID extensions */
10498         if (edid == NULL || edid->extensions == 0)
10499                 return -ENODEV;
10500
10501         /* Find CEA extension */
10502         for (i = 0; i < edid->extensions; i++) {
10503                 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10504                 if (edid_ext[0] == CEA_EXT)
10505                         break;
10506         }
10507
10508         if (i == edid->extensions)
10509                 return -ENODEV;
10510
10511         /*----- cea_db_offsets() -----*/
10512         if (edid_ext[0] != CEA_EXT)
10513                 return -ENODEV;
10514
10515         valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
10516
10517         return valid_vsdb_found ? i : -ENODEV;
10518 }
10519
10520 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10521                                         struct edid *edid)
10522 {
10523         int i = 0;
10524         struct detailed_timing *timing;
10525         struct detailed_non_pixel *data;
10526         struct detailed_data_monitor_range *range;
10527         struct amdgpu_dm_connector *amdgpu_dm_connector =
10528                         to_amdgpu_dm_connector(connector);
10529         struct dm_connector_state *dm_con_state = NULL;
10530
10531         struct drm_device *dev = connector->dev;
10532         struct amdgpu_device *adev = drm_to_adev(dev);
10533         bool freesync_capable = false;
10534         struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
10535
10536         if (!connector->state) {
10537                 DRM_ERROR("%s - Connector has no state", __func__);
10538                 goto update;
10539         }
10540
10541         if (!edid) {
10542                 dm_con_state = to_dm_connector_state(connector->state);
10543
10544                 amdgpu_dm_connector->min_vfreq = 0;
10545                 amdgpu_dm_connector->max_vfreq = 0;
10546                 amdgpu_dm_connector->pixel_clock_mhz = 0;
10547
10548                 goto update;
10549         }
10550
10551         dm_con_state = to_dm_connector_state(connector->state);
10552
10553         if (!amdgpu_dm_connector->dc_sink) {
10554                 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
10555                 goto update;
10556         }
10557         if (!adev->dm.freesync_module)
10558                 goto update;
10559
10560
10561         if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10562                 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
10563                 bool edid_check_required = false;
10564
10565                 if (edid) {
10566                         edid_check_required = is_dp_capable_without_timing_msa(
10567                                                 adev->dm.dc,
10568                                                 amdgpu_dm_connector);
10569                 }
10570
10571                 if (edid_check_required == true && (edid->version > 1 ||
10572                    (edid->version == 1 && edid->revision > 1))) {
10573                         for (i = 0; i < 4; i++) {
10574
10575                                 timing  = &edid->detailed_timings[i];
10576                                 data    = &timing->data.other_data;
10577                                 range   = &data->data.range;
10578                                 /*
10579                                  * Check if monitor has continuous frequency mode
10580                                  */
10581                                 if (data->type != EDID_DETAIL_MONITOR_RANGE)
10582                                         continue;
10583                                 /*
10584                                  * Check for flag range limits only. If flag == 1 then
10585                                  * no additional timing information provided.
10586                                  * Default GTF, GTF Secondary curve and CVT are not
10587                                  * supported
10588                                  */
10589                                 if (range->flags != 1)
10590                                         continue;
10591
10592                                 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10593                                 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10594                                 amdgpu_dm_connector->pixel_clock_mhz =
10595                                         range->pixel_clock_mhz * 10;
10596
10597                                 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10598                                 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10599
10600                                 break;
10601                         }
10602
10603                         if (amdgpu_dm_connector->max_vfreq -
10604                             amdgpu_dm_connector->min_vfreq > 10) {
10605
10606                                 freesync_capable = true;
10607                         }
10608                 }
10609         } else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10610                 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10611                 if (i >= 0 && vsdb_info.freesync_supported) {
10612                         timing  = &edid->detailed_timings[i];
10613                         data    = &timing->data.other_data;
10614
10615                         amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10616                         amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10617                         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10618                                 freesync_capable = true;
10619
10620                         connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10621                         connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10622                 }
10623         }
10624
10625 update:
10626         if (dm_con_state)
10627                 dm_con_state->freesync_capable = freesync_capable;
10628
10629         if (connector->vrr_capable_property)
10630                 drm_connector_set_vrr_capable_property(connector,
10631                                                        freesync_capable);
10632 }
10633
10634 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
10635 {
10636         uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
10637
10638         if (!(link->connector_signal & SIGNAL_TYPE_EDP))
10639                 return;
10640         if (link->type == dc_connection_none)
10641                 return;
10642         if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
10643                                         dpcd_data, sizeof(dpcd_data))) {
10644                 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
10645
10646                 if (dpcd_data[0] == 0) {
10647                         link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
10648                         link->psr_settings.psr_feature_enabled = false;
10649                 } else {
10650                         link->psr_settings.psr_version = DC_PSR_VERSION_1;
10651                         link->psr_settings.psr_feature_enabled = true;
10652                 }
10653
10654                 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
10655         }
10656 }
10657
10658 /*
10659  * amdgpu_dm_link_setup_psr() - configure psr link
10660  * @stream: stream state
10661  *
10662  * Return: true if success
10663  */
10664 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
10665 {
10666         struct dc_link *link = NULL;
10667         struct psr_config psr_config = {0};
10668         struct psr_context psr_context = {0};
10669         bool ret = false;
10670
10671         if (stream == NULL)
10672                 return false;
10673
10674         link = stream->link;
10675
10676         psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
10677
10678         if (psr_config.psr_version > 0) {
10679                 psr_config.psr_exit_link_training_required = 0x1;
10680                 psr_config.psr_frame_capture_indication_req = 0;
10681                 psr_config.psr_rfb_setup_time = 0x37;
10682                 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
10683                 psr_config.allow_smu_optimizations = 0x0;
10684
10685                 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
10686
10687         }
10688         DRM_DEBUG_DRIVER("PSR link: %d\n",      link->psr_settings.psr_feature_enabled);
10689
10690         return ret;
10691 }
10692
10693 /*
10694  * amdgpu_dm_psr_enable() - enable psr f/w
10695  * @stream: stream state
10696  *
10697  * Return: true if success
10698  */
10699 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
10700 {
10701         struct dc_link *link = stream->link;
10702         unsigned int vsync_rate_hz = 0;
10703         struct dc_static_screen_params params = {0};
10704         /* Calculate number of static frames before generating interrupt to
10705          * enter PSR.
10706          */
10707         // Init fail safe of 2 frames static
10708         unsigned int num_frames_static = 2;
10709
10710         DRM_DEBUG_DRIVER("Enabling psr...\n");
10711
10712         vsync_rate_hz = div64_u64(div64_u64((
10713                         stream->timing.pix_clk_100hz * 100),
10714                         stream->timing.v_total),
10715                         stream->timing.h_total);
10716
10717         /* Round up
10718          * Calculate number of frames such that at least 30 ms of time has
10719          * passed.
10720          */
10721         if (vsync_rate_hz != 0) {
10722                 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
10723                 num_frames_static = (30000 / frame_time_microsec) + 1;
10724         }
10725
10726         params.triggers.cursor_update = true;
10727         params.triggers.overlay_update = true;
10728         params.triggers.surface_update = true;
10729         params.num_frames = num_frames_static;
10730
10731         dc_stream_set_static_screen_params(link->ctx->dc,
10732                                            &stream, 1,
10733                                            &params);
10734
10735         return dc_link_set_psr_allow_active(link, true, false, false);
10736 }
10737
10738 /*
10739  * amdgpu_dm_psr_disable() - disable psr f/w
10740  * @stream:  stream state
10741  *
10742  * Return: true if success
10743  */
10744 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
10745 {
10746
10747         DRM_DEBUG_DRIVER("Disabling psr...\n");
10748
10749         return dc_link_set_psr_allow_active(stream->link, false, true, false);
10750 }
10751
10752 /*
10753  * amdgpu_dm_psr_disable() - disable psr f/w
10754  * if psr is enabled on any stream
10755  *
10756  * Return: true if success
10757  */
10758 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
10759 {
10760         DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
10761         return dc_set_psr_allow_active(dm->dc, false);
10762 }
10763
10764 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10765 {
10766         struct amdgpu_device *adev = drm_to_adev(dev);
10767         struct dc *dc = adev->dm.dc;
10768         int i;
10769
10770         mutex_lock(&adev->dm.dc_lock);
10771         if (dc->current_state) {
10772                 for (i = 0; i < dc->current_state->stream_count; ++i)
10773                         dc->current_state->streams[i]
10774                                 ->triggered_crtc_reset.enabled =
10775                                 adev->dm.force_timing_sync;
10776
10777                 dm_enable_per_frame_crtc_master_sync(dc->current_state);
10778                 dc_trigger_sync(dc, dc->current_state);
10779         }
10780         mutex_unlock(&adev->dm.dc_lock);
10781 }
10782
10783 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10784                        uint32_t value, const char *func_name)
10785 {
10786 #ifdef DM_CHECK_ADDR_0
10787         if (address == 0) {
10788                 DC_ERR("invalid register write. address = 0");
10789                 return;
10790         }
10791 #endif
10792         cgs_write_register(ctx->cgs_device, address, value);
10793         trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10794 }
10795
10796 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10797                           const char *func_name)
10798 {
10799         uint32_t value;
10800 #ifdef DM_CHECK_ADDR_0
10801         if (address == 0) {
10802                 DC_ERR("invalid register read; address = 0\n");
10803                 return 0;
10804         }
10805 #endif
10806
10807         if (ctx->dmub_srv &&
10808             ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10809             !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10810                 ASSERT(false);
10811                 return 0;
10812         }
10813
10814         value = cgs_read_register(ctx->cgs_device, address);
10815
10816         trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10817
10818         return value;
10819 }
10820
10821 int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int linkIndex,
10822                                 struct aux_payload *payload, enum aux_return_code_type *operation_result)
10823 {
10824         struct amdgpu_device *adev = ctx->driver_context;
10825         int ret = 0;
10826
10827         dc_process_dmub_aux_transfer_async(ctx->dc, linkIndex, payload);
10828         ret = wait_for_completion_interruptible_timeout(&adev->dm.dmub_aux_transfer_done, 10*HZ);
10829         if (ret == 0) {
10830                 *operation_result = AUX_RET_ERROR_TIMEOUT;
10831                 return -1;
10832         }
10833         *operation_result = (enum aux_return_code_type)adev->dm.dmub_notify->result;
10834
10835         if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
10836                 (*payload->reply) = adev->dm.dmub_notify->aux_reply.command;
10837
10838                 // For read case, Copy data to payload
10839                 if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
10840                 (*payload->reply == AUX_TRANSACTION_REPLY_AUX_ACK))
10841                         memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
10842                         adev->dm.dmub_notify->aux_reply.length);
10843         }
10844
10845         return adev->dm.dmub_notify->aux_reply.length;
10846 }