drm/amd/display: Add DM support for Beige Goby
[platform/kernel/linux-starfive.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "dc/dc_edid_parser.h"
38 #include "dc/dc_stat.h"
39 #include "amdgpu_dm_trace.h"
40
41 #include "vid.h"
42 #include "amdgpu.h"
43 #include "amdgpu_display.h"
44 #include "amdgpu_ucode.h"
45 #include "atom.h"
46 #include "amdgpu_dm.h"
47 #ifdef CONFIG_DRM_AMD_DC_HDCP
48 #include "amdgpu_dm_hdcp.h"
49 #include <drm/drm_hdcp.h>
50 #endif
51 #include "amdgpu_pm.h"
52
53 #include "amd_shared.h"
54 #include "amdgpu_dm_irq.h"
55 #include "dm_helpers.h"
56 #include "amdgpu_dm_mst_types.h"
57 #if defined(CONFIG_DEBUG_FS)
58 #include "amdgpu_dm_debugfs.h"
59 #endif
60
61 #include "ivsrcid/ivsrcid_vislands30.h"
62
63 #include "i2caux_interface.h"
64 #include <linux/module.h>
65 #include <linux/moduleparam.h>
66 #include <linux/types.h>
67 #include <linux/pm_runtime.h>
68 #include <linux/pci.h>
69 #include <linux/firmware.h>
70 #include <linux/component.h>
71
72 #include <drm/drm_atomic.h>
73 #include <drm/drm_atomic_uapi.h>
74 #include <drm/drm_atomic_helper.h>
75 #include <drm/drm_dp_mst_helper.h>
76 #include <drm/drm_fb_helper.h>
77 #include <drm/drm_fourcc.h>
78 #include <drm/drm_edid.h>
79 #include <drm/drm_vblank.h>
80 #include <drm/drm_audio_component.h>
81
82 #if defined(CONFIG_DRM_AMD_DC_DCN)
83 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
84
85 #include "dcn/dcn_1_0_offset.h"
86 #include "dcn/dcn_1_0_sh_mask.h"
87 #include "soc15_hw_ip.h"
88 #include "vega10_ip_offset.h"
89
90 #include "soc15_common.h"
91 #endif
92
93 #include "modules/inc/mod_freesync.h"
94 #include "modules/power/power_helpers.h"
95 #include "modules/inc/mod_info_packet.h"
96
97 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
98 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
99 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
100 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
101 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
103 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
105 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
107 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
108 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
109 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
110 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
111
112 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
113 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
114
115 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
116 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
117
118 /* Number of bytes in PSP header for firmware. */
119 #define PSP_HEADER_BYTES 0x100
120
121 /* Number of bytes in PSP footer for firmware. */
122 #define PSP_FOOTER_BYTES 0x100
123
124 /**
125  * DOC: overview
126  *
127  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
128  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
129  * requests into DC requests, and DC responses into DRM responses.
130  *
131  * The root control structure is &struct amdgpu_display_manager.
132  */
133
134 /* basic init/fini API */
135 static int amdgpu_dm_init(struct amdgpu_device *adev);
136 static void amdgpu_dm_fini(struct amdgpu_device *adev);
137 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
138
139 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
140 {
141         switch (link->dpcd_caps.dongle_type) {
142         case DISPLAY_DONGLE_NONE:
143                 return DRM_MODE_SUBCONNECTOR_Native;
144         case DISPLAY_DONGLE_DP_VGA_CONVERTER:
145                 return DRM_MODE_SUBCONNECTOR_VGA;
146         case DISPLAY_DONGLE_DP_DVI_CONVERTER:
147         case DISPLAY_DONGLE_DP_DVI_DONGLE:
148                 return DRM_MODE_SUBCONNECTOR_DVID;
149         case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
150         case DISPLAY_DONGLE_DP_HDMI_DONGLE:
151                 return DRM_MODE_SUBCONNECTOR_HDMIA;
152         case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
153         default:
154                 return DRM_MODE_SUBCONNECTOR_Unknown;
155         }
156 }
157
158 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
159 {
160         struct dc_link *link = aconnector->dc_link;
161         struct drm_connector *connector = &aconnector->base;
162         enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
163
164         if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
165                 return;
166
167         if (aconnector->dc_sink)
168                 subconnector = get_subconnector_type(link);
169
170         drm_object_property_set_value(&connector->base,
171                         connector->dev->mode_config.dp_subconnector_property,
172                         subconnector);
173 }
174
175 /*
176  * initializes drm_device display related structures, based on the information
177  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
178  * drm_encoder, drm_mode_config
179  *
180  * Returns 0 on success
181  */
182 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
183 /* removes and deallocates the drm structures, created by the above function */
184 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
185
186 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
187                                 struct drm_plane *plane,
188                                 unsigned long possible_crtcs,
189                                 const struct dc_plane_cap *plane_cap);
190 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
191                                struct drm_plane *plane,
192                                uint32_t link_index);
193 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
194                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
195                                     uint32_t link_index,
196                                     struct amdgpu_encoder *amdgpu_encoder);
197 static int amdgpu_dm_encoder_init(struct drm_device *dev,
198                                   struct amdgpu_encoder *aencoder,
199                                   uint32_t link_index);
200
201 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
202
203 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
204
205 static int amdgpu_dm_atomic_check(struct drm_device *dev,
206                                   struct drm_atomic_state *state);
207
208 static void handle_cursor_update(struct drm_plane *plane,
209                                  struct drm_plane_state *old_plane_state);
210
211 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
212 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
213 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
214 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
215 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
216
217 static const struct drm_format_info *
218 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
219
220 static bool
221 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
222                                  struct drm_crtc_state *new_crtc_state);
223 /*
224  * dm_vblank_get_counter
225  *
226  * @brief
227  * Get counter for number of vertical blanks
228  *
229  * @param
230  * struct amdgpu_device *adev - [in] desired amdgpu device
231  * int disp_idx - [in] which CRTC to get the counter from
232  *
233  * @return
234  * Counter for vertical blanks
235  */
236 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
237 {
238         if (crtc >= adev->mode_info.num_crtc)
239                 return 0;
240         else {
241                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
242
243                 if (acrtc->dm_irq_params.stream == NULL) {
244                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
245                                   crtc);
246                         return 0;
247                 }
248
249                 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
250         }
251 }
252
253 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
254                                   u32 *vbl, u32 *position)
255 {
256         uint32_t v_blank_start, v_blank_end, h_position, v_position;
257
258         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
259                 return -EINVAL;
260         else {
261                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
262
263                 if (acrtc->dm_irq_params.stream ==  NULL) {
264                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
265                                   crtc);
266                         return 0;
267                 }
268
269                 /*
270                  * TODO rework base driver to use values directly.
271                  * for now parse it back into reg-format
272                  */
273                 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
274                                          &v_blank_start,
275                                          &v_blank_end,
276                                          &h_position,
277                                          &v_position);
278
279                 *position = v_position | (h_position << 16);
280                 *vbl = v_blank_start | (v_blank_end << 16);
281         }
282
283         return 0;
284 }
285
286 static bool dm_is_idle(void *handle)
287 {
288         /* XXX todo */
289         return true;
290 }
291
292 static int dm_wait_for_idle(void *handle)
293 {
294         /* XXX todo */
295         return 0;
296 }
297
298 static bool dm_check_soft_reset(void *handle)
299 {
300         return false;
301 }
302
303 static int dm_soft_reset(void *handle)
304 {
305         /* XXX todo */
306         return 0;
307 }
308
309 static struct amdgpu_crtc *
310 get_crtc_by_otg_inst(struct amdgpu_device *adev,
311                      int otg_inst)
312 {
313         struct drm_device *dev = adev_to_drm(adev);
314         struct drm_crtc *crtc;
315         struct amdgpu_crtc *amdgpu_crtc;
316
317         if (otg_inst == -1) {
318                 WARN_ON(1);
319                 return adev->mode_info.crtcs[0];
320         }
321
322         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
323                 amdgpu_crtc = to_amdgpu_crtc(crtc);
324
325                 if (amdgpu_crtc->otg_inst == otg_inst)
326                         return amdgpu_crtc;
327         }
328
329         return NULL;
330 }
331
332 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
333 {
334         return acrtc->dm_irq_params.freesync_config.state ==
335                        VRR_STATE_ACTIVE_VARIABLE ||
336                acrtc->dm_irq_params.freesync_config.state ==
337                        VRR_STATE_ACTIVE_FIXED;
338 }
339
340 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
341 {
342         return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
343                dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
344 }
345
346 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
347                                               struct dm_crtc_state *new_state)
348 {
349         if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
350                 return true;
351         else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
352                 return true;
353         else
354                 return false;
355 }
356
357 /**
358  * dm_pflip_high_irq() - Handle pageflip interrupt
359  * @interrupt_params: ignored
360  *
361  * Handles the pageflip interrupt by notifying all interested parties
362  * that the pageflip has been completed.
363  */
364 static void dm_pflip_high_irq(void *interrupt_params)
365 {
366         struct amdgpu_crtc *amdgpu_crtc;
367         struct common_irq_params *irq_params = interrupt_params;
368         struct amdgpu_device *adev = irq_params->adev;
369         unsigned long flags;
370         struct drm_pending_vblank_event *e;
371         uint32_t vpos, hpos, v_blank_start, v_blank_end;
372         bool vrr_active;
373
374         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
375
376         /* IRQ could occur when in initial stage */
377         /* TODO work and BO cleanup */
378         if (amdgpu_crtc == NULL) {
379                 DC_LOG_PFLIP("CRTC is null, returning.\n");
380                 return;
381         }
382
383         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
384
385         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
386                 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
387                                                  amdgpu_crtc->pflip_status,
388                                                  AMDGPU_FLIP_SUBMITTED,
389                                                  amdgpu_crtc->crtc_id,
390                                                  amdgpu_crtc);
391                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
392                 return;
393         }
394
395         /* page flip completed. */
396         e = amdgpu_crtc->event;
397         amdgpu_crtc->event = NULL;
398
399         if (!e)
400                 WARN_ON(1);
401
402         vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
403
404         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
405         if (!vrr_active ||
406             !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
407                                       &v_blank_end, &hpos, &vpos) ||
408             (vpos < v_blank_start)) {
409                 /* Update to correct count and vblank timestamp if racing with
410                  * vblank irq. This also updates to the correct vblank timestamp
411                  * even in VRR mode, as scanout is past the front-porch atm.
412                  */
413                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
414
415                 /* Wake up userspace by sending the pageflip event with proper
416                  * count and timestamp of vblank of flip completion.
417                  */
418                 if (e) {
419                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
420
421                         /* Event sent, so done with vblank for this flip */
422                         drm_crtc_vblank_put(&amdgpu_crtc->base);
423                 }
424         } else if (e) {
425                 /* VRR active and inside front-porch: vblank count and
426                  * timestamp for pageflip event will only be up to date after
427                  * drm_crtc_handle_vblank() has been executed from late vblank
428                  * irq handler after start of back-porch (vline 0). We queue the
429                  * pageflip event for send-out by drm_crtc_handle_vblank() with
430                  * updated timestamp and count, once it runs after us.
431                  *
432                  * We need to open-code this instead of using the helper
433                  * drm_crtc_arm_vblank_event(), as that helper would
434                  * call drm_crtc_accurate_vblank_count(), which we must
435                  * not call in VRR mode while we are in front-porch!
436                  */
437
438                 /* sequence will be replaced by real count during send-out. */
439                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
440                 e->pipe = amdgpu_crtc->crtc_id;
441
442                 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
443                 e = NULL;
444         }
445
446         /* Keep track of vblank of this flip for flip throttling. We use the
447          * cooked hw counter, as that one incremented at start of this vblank
448          * of pageflip completion, so last_flip_vblank is the forbidden count
449          * for queueing new pageflips if vsync + VRR is enabled.
450          */
451         amdgpu_crtc->dm_irq_params.last_flip_vblank =
452                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
453
454         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
455         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
456
457         DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
458                      amdgpu_crtc->crtc_id, amdgpu_crtc,
459                      vrr_active, (int) !e);
460 }
461
462 static void dm_vupdate_high_irq(void *interrupt_params)
463 {
464         struct common_irq_params *irq_params = interrupt_params;
465         struct amdgpu_device *adev = irq_params->adev;
466         struct amdgpu_crtc *acrtc;
467         struct drm_device *drm_dev;
468         struct drm_vblank_crtc *vblank;
469         ktime_t frame_duration_ns, previous_timestamp;
470         unsigned long flags;
471         int vrr_active;
472
473         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
474
475         if (acrtc) {
476                 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
477                 drm_dev = acrtc->base.dev;
478                 vblank = &drm_dev->vblank[acrtc->base.index];
479                 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
480                 frame_duration_ns = vblank->time - previous_timestamp;
481
482                 if (frame_duration_ns > 0) {
483                         trace_amdgpu_refresh_rate_track(acrtc->base.index,
484                                                 frame_duration_ns,
485                                                 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
486                         atomic64_set(&irq_params->previous_timestamp, vblank->time);
487                 }
488
489                 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
490                               acrtc->crtc_id,
491                               vrr_active);
492
493                 /* Core vblank handling is done here after end of front-porch in
494                  * vrr mode, as vblank timestamping will give valid results
495                  * while now done after front-porch. This will also deliver
496                  * page-flip completion events that have been queued to us
497                  * if a pageflip happened inside front-porch.
498                  */
499                 if (vrr_active) {
500                         drm_crtc_handle_vblank(&acrtc->base);
501
502                         /* BTR processing for pre-DCE12 ASICs */
503                         if (acrtc->dm_irq_params.stream &&
504                             adev->family < AMDGPU_FAMILY_AI) {
505                                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
506                                 mod_freesync_handle_v_update(
507                                     adev->dm.freesync_module,
508                                     acrtc->dm_irq_params.stream,
509                                     &acrtc->dm_irq_params.vrr_params);
510
511                                 dc_stream_adjust_vmin_vmax(
512                                     adev->dm.dc,
513                                     acrtc->dm_irq_params.stream,
514                                     &acrtc->dm_irq_params.vrr_params.adjust);
515                                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
516                         }
517                 }
518         }
519 }
520
521 /**
522  * dm_crtc_high_irq() - Handles CRTC interrupt
523  * @interrupt_params: used for determining the CRTC instance
524  *
525  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
526  * event handler.
527  */
528 static void dm_crtc_high_irq(void *interrupt_params)
529 {
530         struct common_irq_params *irq_params = interrupt_params;
531         struct amdgpu_device *adev = irq_params->adev;
532         struct amdgpu_crtc *acrtc;
533         unsigned long flags;
534         int vrr_active;
535
536         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
537         if (!acrtc)
538                 return;
539
540         vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
541
542         DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
543                       vrr_active, acrtc->dm_irq_params.active_planes);
544
545         /**
546          * Core vblank handling at start of front-porch is only possible
547          * in non-vrr mode, as only there vblank timestamping will give
548          * valid results while done in front-porch. Otherwise defer it
549          * to dm_vupdate_high_irq after end of front-porch.
550          */
551         if (!vrr_active)
552                 drm_crtc_handle_vblank(&acrtc->base);
553
554         /**
555          * Following stuff must happen at start of vblank, for crc
556          * computation and below-the-range btr support in vrr mode.
557          */
558         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
559
560         /* BTR updates need to happen before VUPDATE on Vega and above. */
561         if (adev->family < AMDGPU_FAMILY_AI)
562                 return;
563
564         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
565
566         if (acrtc->dm_irq_params.stream &&
567             acrtc->dm_irq_params.vrr_params.supported &&
568             acrtc->dm_irq_params.freesync_config.state ==
569                     VRR_STATE_ACTIVE_VARIABLE) {
570                 mod_freesync_handle_v_update(adev->dm.freesync_module,
571                                              acrtc->dm_irq_params.stream,
572                                              &acrtc->dm_irq_params.vrr_params);
573
574                 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
575                                            &acrtc->dm_irq_params.vrr_params.adjust);
576         }
577
578         /*
579          * If there aren't any active_planes then DCH HUBP may be clock-gated.
580          * In that case, pageflip completion interrupts won't fire and pageflip
581          * completion events won't get delivered. Prevent this by sending
582          * pending pageflip events from here if a flip is still pending.
583          *
584          * If any planes are enabled, use dm_pflip_high_irq() instead, to
585          * avoid race conditions between flip programming and completion,
586          * which could cause too early flip completion events.
587          */
588         if (adev->family >= AMDGPU_FAMILY_RV &&
589             acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
590             acrtc->dm_irq_params.active_planes == 0) {
591                 if (acrtc->event) {
592                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
593                         acrtc->event = NULL;
594                         drm_crtc_vblank_put(&acrtc->base);
595                 }
596                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
597         }
598
599         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
600 }
601
602 #if defined(CONFIG_DRM_AMD_DC_DCN)
603 /**
604  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
605  * DCN generation ASICs
606  * @interrupt params - interrupt parameters
607  *
608  * Used to set crc window/read out crc value at vertical line 0 position
609  */
610 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
611 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
612 {
613         struct common_irq_params *irq_params = interrupt_params;
614         struct amdgpu_device *adev = irq_params->adev;
615         struct amdgpu_crtc *acrtc;
616
617         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
618
619         if (!acrtc)
620                 return;
621
622         amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
623 }
624 #endif
625
626 /**
627  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
628  * @interrupt_params: used for determining the Outbox instance
629  *
630  * Handles the Outbox Interrupt
631  * event handler.
632  */
633 #define DMUB_TRACE_MAX_READ 64
634 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
635 {
636         struct dmub_notification notify;
637         struct common_irq_params *irq_params = interrupt_params;
638         struct amdgpu_device *adev = irq_params->adev;
639         struct amdgpu_display_manager *dm = &adev->dm;
640         struct dmcub_trace_buf_entry entry = { 0 };
641         uint32_t count = 0;
642
643         if (dc_enable_dmub_notifications(adev->dm.dc)) {
644                 if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
645                         do {
646                                 dc_stat_get_dmub_notification(adev->dm.dc, &notify);
647                         } while (notify.pending_notification);
648
649                         if (adev->dm.dmub_notify)
650                                 memcpy(adev->dm.dmub_notify, &notify, sizeof(struct dmub_notification));
651                         if (notify.type == DMUB_NOTIFICATION_AUX_REPLY)
652                                 complete(&adev->dm.dmub_aux_transfer_done);
653                         // TODO : HPD Implementation
654
655                 } else {
656                         DRM_ERROR("DM: Failed to receive correct outbox IRQ !");
657                 }
658         }
659
660
661         do {
662                 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
663                         trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
664                                                         entry.param0, entry.param1);
665
666                         DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
667                                  entry.trace_code, entry.tick_count, entry.param0, entry.param1);
668                 } else
669                         break;
670
671                 count++;
672
673         } while (count <= DMUB_TRACE_MAX_READ);
674
675         ASSERT(count <= DMUB_TRACE_MAX_READ);
676 }
677 #endif
678
679 static int dm_set_clockgating_state(void *handle,
680                   enum amd_clockgating_state state)
681 {
682         return 0;
683 }
684
685 static int dm_set_powergating_state(void *handle,
686                   enum amd_powergating_state state)
687 {
688         return 0;
689 }
690
691 /* Prototypes of private functions */
692 static int dm_early_init(void* handle);
693
694 /* Allocate memory for FBC compressed data  */
695 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
696 {
697         struct drm_device *dev = connector->dev;
698         struct amdgpu_device *adev = drm_to_adev(dev);
699         struct dm_compressor_info *compressor = &adev->dm.compressor;
700         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
701         struct drm_display_mode *mode;
702         unsigned long max_size = 0;
703
704         if (adev->dm.dc->fbc_compressor == NULL)
705                 return;
706
707         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
708                 return;
709
710         if (compressor->bo_ptr)
711                 return;
712
713
714         list_for_each_entry(mode, &connector->modes, head) {
715                 if (max_size < mode->htotal * mode->vtotal)
716                         max_size = mode->htotal * mode->vtotal;
717         }
718
719         if (max_size) {
720                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
721                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
722                             &compressor->gpu_addr, &compressor->cpu_addr);
723
724                 if (r)
725                         DRM_ERROR("DM: Failed to initialize FBC\n");
726                 else {
727                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
728                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
729                 }
730
731         }
732
733 }
734
735 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
736                                           int pipe, bool *enabled,
737                                           unsigned char *buf, int max_bytes)
738 {
739         struct drm_device *dev = dev_get_drvdata(kdev);
740         struct amdgpu_device *adev = drm_to_adev(dev);
741         struct drm_connector *connector;
742         struct drm_connector_list_iter conn_iter;
743         struct amdgpu_dm_connector *aconnector;
744         int ret = 0;
745
746         *enabled = false;
747
748         mutex_lock(&adev->dm.audio_lock);
749
750         drm_connector_list_iter_begin(dev, &conn_iter);
751         drm_for_each_connector_iter(connector, &conn_iter) {
752                 aconnector = to_amdgpu_dm_connector(connector);
753                 if (aconnector->audio_inst != port)
754                         continue;
755
756                 *enabled = true;
757                 ret = drm_eld_size(connector->eld);
758                 memcpy(buf, connector->eld, min(max_bytes, ret));
759
760                 break;
761         }
762         drm_connector_list_iter_end(&conn_iter);
763
764         mutex_unlock(&adev->dm.audio_lock);
765
766         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
767
768         return ret;
769 }
770
771 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
772         .get_eld = amdgpu_dm_audio_component_get_eld,
773 };
774
775 static int amdgpu_dm_audio_component_bind(struct device *kdev,
776                                        struct device *hda_kdev, void *data)
777 {
778         struct drm_device *dev = dev_get_drvdata(kdev);
779         struct amdgpu_device *adev = drm_to_adev(dev);
780         struct drm_audio_component *acomp = data;
781
782         acomp->ops = &amdgpu_dm_audio_component_ops;
783         acomp->dev = kdev;
784         adev->dm.audio_component = acomp;
785
786         return 0;
787 }
788
789 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
790                                           struct device *hda_kdev, void *data)
791 {
792         struct drm_device *dev = dev_get_drvdata(kdev);
793         struct amdgpu_device *adev = drm_to_adev(dev);
794         struct drm_audio_component *acomp = data;
795
796         acomp->ops = NULL;
797         acomp->dev = NULL;
798         adev->dm.audio_component = NULL;
799 }
800
801 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
802         .bind   = amdgpu_dm_audio_component_bind,
803         .unbind = amdgpu_dm_audio_component_unbind,
804 };
805
806 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
807 {
808         int i, ret;
809
810         if (!amdgpu_audio)
811                 return 0;
812
813         adev->mode_info.audio.enabled = true;
814
815         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
816
817         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
818                 adev->mode_info.audio.pin[i].channels = -1;
819                 adev->mode_info.audio.pin[i].rate = -1;
820                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
821                 adev->mode_info.audio.pin[i].status_bits = 0;
822                 adev->mode_info.audio.pin[i].category_code = 0;
823                 adev->mode_info.audio.pin[i].connected = false;
824                 adev->mode_info.audio.pin[i].id =
825                         adev->dm.dc->res_pool->audios[i]->inst;
826                 adev->mode_info.audio.pin[i].offset = 0;
827         }
828
829         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
830         if (ret < 0)
831                 return ret;
832
833         adev->dm.audio_registered = true;
834
835         return 0;
836 }
837
838 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
839 {
840         if (!amdgpu_audio)
841                 return;
842
843         if (!adev->mode_info.audio.enabled)
844                 return;
845
846         if (adev->dm.audio_registered) {
847                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
848                 adev->dm.audio_registered = false;
849         }
850
851         /* TODO: Disable audio? */
852
853         adev->mode_info.audio.enabled = false;
854 }
855
856 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
857 {
858         struct drm_audio_component *acomp = adev->dm.audio_component;
859
860         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
861                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
862
863                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
864                                                  pin, -1);
865         }
866 }
867
868 static int dm_dmub_hw_init(struct amdgpu_device *adev)
869 {
870         const struct dmcub_firmware_header_v1_0 *hdr;
871         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
872         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
873         const struct firmware *dmub_fw = adev->dm.dmub_fw;
874         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
875         struct abm *abm = adev->dm.dc->res_pool->abm;
876         struct dmub_srv_hw_params hw_params;
877         enum dmub_status status;
878         const unsigned char *fw_inst_const, *fw_bss_data;
879         uint32_t i, fw_inst_const_size, fw_bss_data_size;
880         bool has_hw_support;
881
882         if (!dmub_srv)
883                 /* DMUB isn't supported on the ASIC. */
884                 return 0;
885
886         if (!fb_info) {
887                 DRM_ERROR("No framebuffer info for DMUB service.\n");
888                 return -EINVAL;
889         }
890
891         if (!dmub_fw) {
892                 /* Firmware required for DMUB support. */
893                 DRM_ERROR("No firmware provided for DMUB.\n");
894                 return -EINVAL;
895         }
896
897         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
898         if (status != DMUB_STATUS_OK) {
899                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
900                 return -EINVAL;
901         }
902
903         if (!has_hw_support) {
904                 DRM_INFO("DMUB unsupported on ASIC\n");
905                 return 0;
906         }
907
908         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
909
910         fw_inst_const = dmub_fw->data +
911                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
912                         PSP_HEADER_BYTES;
913
914         fw_bss_data = dmub_fw->data +
915                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
916                       le32_to_cpu(hdr->inst_const_bytes);
917
918         /* Copy firmware and bios info into FB memory. */
919         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
920                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
921
922         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
923
924         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
925          * amdgpu_ucode_init_single_fw will load dmub firmware
926          * fw_inst_const part to cw0; otherwise, the firmware back door load
927          * will be done by dm_dmub_hw_init
928          */
929         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
930                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
931                                 fw_inst_const_size);
932         }
933
934         if (fw_bss_data_size)
935                 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
936                        fw_bss_data, fw_bss_data_size);
937
938         /* Copy firmware bios info into FB memory. */
939         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
940                adev->bios_size);
941
942         /* Reset regions that need to be reset. */
943         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
944         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
945
946         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
947                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
948
949         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
950                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
951
952         /* Initialize hardware. */
953         memset(&hw_params, 0, sizeof(hw_params));
954         hw_params.fb_base = adev->gmc.fb_start;
955         hw_params.fb_offset = adev->gmc.aper_base;
956
957         /* backdoor load firmware and trigger dmub running */
958         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
959                 hw_params.load_inst_const = true;
960
961         if (dmcu)
962                 hw_params.psp_version = dmcu->psp_version;
963
964         for (i = 0; i < fb_info->num_fb; ++i)
965                 hw_params.fb[i] = &fb_info->fb[i];
966
967         status = dmub_srv_hw_init(dmub_srv, &hw_params);
968         if (status != DMUB_STATUS_OK) {
969                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
970                 return -EINVAL;
971         }
972
973         /* Wait for firmware load to finish. */
974         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
975         if (status != DMUB_STATUS_OK)
976                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
977
978         /* Init DMCU and ABM if available. */
979         if (dmcu && abm) {
980                 dmcu->funcs->dmcu_init(dmcu);
981                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
982         }
983
984         adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
985         if (!adev->dm.dc->ctx->dmub_srv) {
986                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
987                 return -ENOMEM;
988         }
989
990         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
991                  adev->dm.dmcub_fw_version);
992
993         return 0;
994 }
995
996 #if defined(CONFIG_DRM_AMD_DC_DCN)
997 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
998 {
999         uint64_t pt_base;
1000         uint32_t logical_addr_low;
1001         uint32_t logical_addr_high;
1002         uint32_t agp_base, agp_bot, agp_top;
1003         PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1004
1005         logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1006         pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1007
1008         if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1009                 /*
1010                  * Raven2 has a HW issue that it is unable to use the vram which
1011                  * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1012                  * workaround that increase system aperture high address (add 1)
1013                  * to get rid of the VM fault and hardware hang.
1014                  */
1015                 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1016         else
1017                 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1018
1019         agp_base = 0;
1020         agp_bot = adev->gmc.agp_start >> 24;
1021         agp_top = adev->gmc.agp_end >> 24;
1022
1023
1024         page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1025         page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1026         page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1027         page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1028         page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1029         page_table_base.low_part = lower_32_bits(pt_base);
1030
1031         pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1032         pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1033
1034         pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1035         pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1036         pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1037
1038         pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1039         pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1040         pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1041
1042         pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1043         pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1044         pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1045
1046         pa_config->is_hvm_enabled = 0;
1047
1048 }
1049 #endif
1050 #if defined(CONFIG_DRM_AMD_DC_DCN)
1051 static void event_mall_stutter(struct work_struct *work)
1052 {
1053
1054         struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
1055         struct amdgpu_display_manager *dm = vblank_work->dm;
1056
1057         mutex_lock(&dm->dc_lock);
1058
1059         if (vblank_work->enable)
1060                 dm->active_vblank_irq_count++;
1061         else if(dm->active_vblank_irq_count)
1062                 dm->active_vblank_irq_count--;
1063
1064         dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1065
1066         DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1067
1068         mutex_unlock(&dm->dc_lock);
1069 }
1070
1071 static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
1072 {
1073
1074         int max_caps = dc->caps.max_links;
1075         struct vblank_workqueue *vblank_work;
1076         int i = 0;
1077
1078         vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
1079         if (ZERO_OR_NULL_PTR(vblank_work)) {
1080                 kfree(vblank_work);
1081                 return NULL;
1082         }
1083
1084         for (i = 0; i < max_caps; i++)
1085                 INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
1086
1087         return vblank_work;
1088 }
1089 #endif
1090 static int amdgpu_dm_init(struct amdgpu_device *adev)
1091 {
1092         struct dc_init_data init_data;
1093 #ifdef CONFIG_DRM_AMD_DC_HDCP
1094         struct dc_callback_init init_params;
1095 #endif
1096         int r;
1097
1098         adev->dm.ddev = adev_to_drm(adev);
1099         adev->dm.adev = adev;
1100
1101         /* Zero all the fields */
1102         memset(&init_data, 0, sizeof(init_data));
1103 #ifdef CONFIG_DRM_AMD_DC_HDCP
1104         memset(&init_params, 0, sizeof(init_params));
1105 #endif
1106
1107         mutex_init(&adev->dm.dc_lock);
1108         mutex_init(&adev->dm.audio_lock);
1109 #if defined(CONFIG_DRM_AMD_DC_DCN)
1110         spin_lock_init(&adev->dm.vblank_lock);
1111 #endif
1112
1113         if(amdgpu_dm_irq_init(adev)) {
1114                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1115                 goto error;
1116         }
1117
1118         init_data.asic_id.chip_family = adev->family;
1119
1120         init_data.asic_id.pci_revision_id = adev->pdev->revision;
1121         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1122
1123         init_data.asic_id.vram_width = adev->gmc.vram_width;
1124         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1125         init_data.asic_id.atombios_base_address =
1126                 adev->mode_info.atom_context->bios;
1127
1128         init_data.driver = adev;
1129
1130         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1131
1132         if (!adev->dm.cgs_device) {
1133                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1134                 goto error;
1135         }
1136
1137         init_data.cgs_device = adev->dm.cgs_device;
1138
1139         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1140
1141         switch (adev->asic_type) {
1142         case CHIP_CARRIZO:
1143         case CHIP_STONEY:
1144         case CHIP_RAVEN:
1145         case CHIP_RENOIR:
1146                 init_data.flags.gpu_vm_support = true;
1147                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1148                         init_data.flags.disable_dmcu = true;
1149                 break;
1150 #if defined(CONFIG_DRM_AMD_DC_DCN)
1151         case CHIP_VANGOGH:
1152                 init_data.flags.gpu_vm_support = true;
1153                 break;
1154 #endif
1155         default:
1156                 break;
1157         }
1158
1159         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1160                 init_data.flags.fbc_support = true;
1161
1162         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1163                 init_data.flags.multi_mon_pp_mclk_switch = true;
1164
1165         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1166                 init_data.flags.disable_fractional_pwm = true;
1167
1168         init_data.flags.power_down_display_on_boot = true;
1169
1170         INIT_LIST_HEAD(&adev->dm.da_list);
1171         /* Display Core create. */
1172         adev->dm.dc = dc_create(&init_data);
1173
1174         if (adev->dm.dc) {
1175                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1176         } else {
1177                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1178                 goto error;
1179         }
1180
1181         if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1182                 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1183                 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1184         }
1185
1186         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1187                 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1188
1189         if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1190                 adev->dm.dc->debug.disable_stutter = true;
1191
1192         if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1193                 adev->dm.dc->debug.disable_dsc = true;
1194
1195         if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1196                 adev->dm.dc->debug.disable_clock_gate = true;
1197
1198         r = dm_dmub_hw_init(adev);
1199         if (r) {
1200                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1201                 goto error;
1202         }
1203
1204         dc_hardware_init(adev->dm.dc);
1205
1206 #if defined(CONFIG_DRM_AMD_DC_DCN)
1207         if (adev->apu_flags) {
1208                 struct dc_phy_addr_space_config pa_config;
1209
1210                 mmhub_read_system_context(adev, &pa_config);
1211
1212                 // Call the DC init_memory func
1213                 dc_setup_system_context(adev->dm.dc, &pa_config);
1214         }
1215 #endif
1216
1217         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1218         if (!adev->dm.freesync_module) {
1219                 DRM_ERROR(
1220                 "amdgpu: failed to initialize freesync_module.\n");
1221         } else
1222                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1223                                 adev->dm.freesync_module);
1224
1225         amdgpu_dm_init_color_mod();
1226
1227 #if defined(CONFIG_DRM_AMD_DC_DCN)
1228         if (adev->dm.dc->caps.max_links > 0) {
1229                 adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
1230
1231                 if (!adev->dm.vblank_workqueue)
1232                         DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1233                 else
1234                         DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
1235         }
1236 #endif
1237
1238 #ifdef CONFIG_DRM_AMD_DC_HDCP
1239         if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1240                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1241
1242                 if (!adev->dm.hdcp_workqueue)
1243                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1244                 else
1245                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1246
1247                 dc_init_callbacks(adev->dm.dc, &init_params);
1248         }
1249 #endif
1250 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1251         adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1252 #endif
1253         if (dc_enable_dmub_notifications(adev->dm.dc)) {
1254                 init_completion(&adev->dm.dmub_aux_transfer_done);
1255                 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1256                 if (!adev->dm.dmub_notify) {
1257                         DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1258                         goto error;
1259                 }
1260                 amdgpu_dm_outbox_init(adev);
1261         }
1262
1263         if (amdgpu_dm_initialize_drm_device(adev)) {
1264                 DRM_ERROR(
1265                 "amdgpu: failed to initialize sw for display support.\n");
1266                 goto error;
1267         }
1268
1269         /* create fake encoders for MST */
1270         dm_dp_create_fake_mst_encoders(adev);
1271
1272         /* TODO: Add_display_info? */
1273
1274         /* TODO use dynamic cursor width */
1275         adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1276         adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1277
1278         if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1279                 DRM_ERROR(
1280                 "amdgpu: failed to initialize sw for display support.\n");
1281                 goto error;
1282         }
1283
1284
1285         DRM_DEBUG_DRIVER("KMS initialized.\n");
1286
1287         return 0;
1288 error:
1289         amdgpu_dm_fini(adev);
1290
1291         return -EINVAL;
1292 }
1293
1294 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1295 {
1296         int i;
1297
1298         for (i = 0; i < adev->dm.display_indexes_num; i++) {
1299                 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1300         }
1301
1302         amdgpu_dm_audio_fini(adev);
1303
1304         amdgpu_dm_destroy_drm_device(&adev->dm);
1305
1306 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1307         if (adev->dm.crc_rd_wrk) {
1308                 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1309                 kfree(adev->dm.crc_rd_wrk);
1310                 adev->dm.crc_rd_wrk = NULL;
1311         }
1312 #endif
1313 #ifdef CONFIG_DRM_AMD_DC_HDCP
1314         if (adev->dm.hdcp_workqueue) {
1315                 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1316                 adev->dm.hdcp_workqueue = NULL;
1317         }
1318
1319         if (adev->dm.dc)
1320                 dc_deinit_callbacks(adev->dm.dc);
1321 #endif
1322
1323 #if defined(CONFIG_DRM_AMD_DC_DCN)
1324         if (adev->dm.vblank_workqueue) {
1325                 adev->dm.vblank_workqueue->dm = NULL;
1326                 kfree(adev->dm.vblank_workqueue);
1327                 adev->dm.vblank_workqueue = NULL;
1328         }
1329 #endif
1330
1331         if (adev->dm.dc->ctx->dmub_srv) {
1332                 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1333                 adev->dm.dc->ctx->dmub_srv = NULL;
1334         }
1335
1336         if (dc_enable_dmub_notifications(adev->dm.dc)) {
1337                 kfree(adev->dm.dmub_notify);
1338                 adev->dm.dmub_notify = NULL;
1339         }
1340
1341         if (adev->dm.dmub_bo)
1342                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1343                                       &adev->dm.dmub_bo_gpu_addr,
1344                                       &adev->dm.dmub_bo_cpu_addr);
1345
1346         /* DC Destroy TODO: Replace destroy DAL */
1347         if (adev->dm.dc)
1348                 dc_destroy(&adev->dm.dc);
1349         /*
1350          * TODO: pageflip, vlank interrupt
1351          *
1352          * amdgpu_dm_irq_fini(adev);
1353          */
1354
1355         if (adev->dm.cgs_device) {
1356                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1357                 adev->dm.cgs_device = NULL;
1358         }
1359         if (adev->dm.freesync_module) {
1360                 mod_freesync_destroy(adev->dm.freesync_module);
1361                 adev->dm.freesync_module = NULL;
1362         }
1363
1364         mutex_destroy(&adev->dm.audio_lock);
1365         mutex_destroy(&adev->dm.dc_lock);
1366
1367         return;
1368 }
1369
1370 static int load_dmcu_fw(struct amdgpu_device *adev)
1371 {
1372         const char *fw_name_dmcu = NULL;
1373         int r;
1374         const struct dmcu_firmware_header_v1_0 *hdr;
1375
1376         switch(adev->asic_type) {
1377 #if defined(CONFIG_DRM_AMD_DC_SI)
1378         case CHIP_TAHITI:
1379         case CHIP_PITCAIRN:
1380         case CHIP_VERDE:
1381         case CHIP_OLAND:
1382 #endif
1383         case CHIP_BONAIRE:
1384         case CHIP_HAWAII:
1385         case CHIP_KAVERI:
1386         case CHIP_KABINI:
1387         case CHIP_MULLINS:
1388         case CHIP_TONGA:
1389         case CHIP_FIJI:
1390         case CHIP_CARRIZO:
1391         case CHIP_STONEY:
1392         case CHIP_POLARIS11:
1393         case CHIP_POLARIS10:
1394         case CHIP_POLARIS12:
1395         case CHIP_VEGAM:
1396         case CHIP_VEGA10:
1397         case CHIP_VEGA12:
1398         case CHIP_VEGA20:
1399         case CHIP_NAVI10:
1400         case CHIP_NAVI14:
1401         case CHIP_RENOIR:
1402         case CHIP_SIENNA_CICHLID:
1403         case CHIP_NAVY_FLOUNDER:
1404         case CHIP_DIMGREY_CAVEFISH:
1405         case CHIP_BEIGE_GOBY:
1406         case CHIP_VANGOGH:
1407                 return 0;
1408         case CHIP_NAVI12:
1409                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1410                 break;
1411         case CHIP_RAVEN:
1412                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1413                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1414                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1415                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1416                 else
1417                         return 0;
1418                 break;
1419         default:
1420                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1421                 return -EINVAL;
1422         }
1423
1424         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1425                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1426                 return 0;
1427         }
1428
1429         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1430         if (r == -ENOENT) {
1431                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1432                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1433                 adev->dm.fw_dmcu = NULL;
1434                 return 0;
1435         }
1436         if (r) {
1437                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1438                         fw_name_dmcu);
1439                 return r;
1440         }
1441
1442         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1443         if (r) {
1444                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1445                         fw_name_dmcu);
1446                 release_firmware(adev->dm.fw_dmcu);
1447                 adev->dm.fw_dmcu = NULL;
1448                 return r;
1449         }
1450
1451         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1452         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1453         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1454         adev->firmware.fw_size +=
1455                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1456
1457         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1458         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1459         adev->firmware.fw_size +=
1460                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1461
1462         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1463
1464         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1465
1466         return 0;
1467 }
1468
1469 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1470 {
1471         struct amdgpu_device *adev = ctx;
1472
1473         return dm_read_reg(adev->dm.dc->ctx, address);
1474 }
1475
1476 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1477                                      uint32_t value)
1478 {
1479         struct amdgpu_device *adev = ctx;
1480
1481         return dm_write_reg(adev->dm.dc->ctx, address, value);
1482 }
1483
1484 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1485 {
1486         struct dmub_srv_create_params create_params;
1487         struct dmub_srv_region_params region_params;
1488         struct dmub_srv_region_info region_info;
1489         struct dmub_srv_fb_params fb_params;
1490         struct dmub_srv_fb_info *fb_info;
1491         struct dmub_srv *dmub_srv;
1492         const struct dmcub_firmware_header_v1_0 *hdr;
1493         const char *fw_name_dmub;
1494         enum dmub_asic dmub_asic;
1495         enum dmub_status status;
1496         int r;
1497
1498         switch (adev->asic_type) {
1499         case CHIP_RENOIR:
1500                 dmub_asic = DMUB_ASIC_DCN21;
1501                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1502                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1503                         fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1504                 break;
1505         case CHIP_SIENNA_CICHLID:
1506                 dmub_asic = DMUB_ASIC_DCN30;
1507                 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1508                 break;
1509         case CHIP_NAVY_FLOUNDER:
1510                 dmub_asic = DMUB_ASIC_DCN30;
1511                 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1512                 break;
1513         case CHIP_VANGOGH:
1514                 dmub_asic = DMUB_ASIC_DCN301;
1515                 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1516                 break;
1517         case CHIP_DIMGREY_CAVEFISH:
1518                 dmub_asic = DMUB_ASIC_DCN302;
1519                 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1520                 break;
1521         case CHIP_BEIGE_GOBY:
1522                 dmub_asic = DMUB_ASIC_DCN303;
1523                 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1524                 break;
1525
1526         default:
1527                 /* ASIC doesn't support DMUB. */
1528                 return 0;
1529         }
1530
1531         r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1532         if (r) {
1533                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1534                 return 0;
1535         }
1536
1537         r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1538         if (r) {
1539                 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1540                 return 0;
1541         }
1542
1543         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1544
1545         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1546                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1547                         AMDGPU_UCODE_ID_DMCUB;
1548                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1549                         adev->dm.dmub_fw;
1550                 adev->firmware.fw_size +=
1551                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1552
1553                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1554                          adev->dm.dmcub_fw_version);
1555         }
1556
1557         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1558
1559         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1560         dmub_srv = adev->dm.dmub_srv;
1561
1562         if (!dmub_srv) {
1563                 DRM_ERROR("Failed to allocate DMUB service!\n");
1564                 return -ENOMEM;
1565         }
1566
1567         memset(&create_params, 0, sizeof(create_params));
1568         create_params.user_ctx = adev;
1569         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1570         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1571         create_params.asic = dmub_asic;
1572
1573         /* Create the DMUB service. */
1574         status = dmub_srv_create(dmub_srv, &create_params);
1575         if (status != DMUB_STATUS_OK) {
1576                 DRM_ERROR("Error creating DMUB service: %d\n", status);
1577                 return -EINVAL;
1578         }
1579
1580         /* Calculate the size of all the regions for the DMUB service. */
1581         memset(&region_params, 0, sizeof(region_params));
1582
1583         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1584                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1585         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1586         region_params.vbios_size = adev->bios_size;
1587         region_params.fw_bss_data = region_params.bss_data_size ?
1588                 adev->dm.dmub_fw->data +
1589                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1590                 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1591         region_params.fw_inst_const =
1592                 adev->dm.dmub_fw->data +
1593                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1594                 PSP_HEADER_BYTES;
1595
1596         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1597                                            &region_info);
1598
1599         if (status != DMUB_STATUS_OK) {
1600                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1601                 return -EINVAL;
1602         }
1603
1604         /*
1605          * Allocate a framebuffer based on the total size of all the regions.
1606          * TODO: Move this into GART.
1607          */
1608         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1609                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1610                                     &adev->dm.dmub_bo_gpu_addr,
1611                                     &adev->dm.dmub_bo_cpu_addr);
1612         if (r)
1613                 return r;
1614
1615         /* Rebase the regions on the framebuffer address. */
1616         memset(&fb_params, 0, sizeof(fb_params));
1617         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1618         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1619         fb_params.region_info = &region_info;
1620
1621         adev->dm.dmub_fb_info =
1622                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1623         fb_info = adev->dm.dmub_fb_info;
1624
1625         if (!fb_info) {
1626                 DRM_ERROR(
1627                         "Failed to allocate framebuffer info for DMUB service!\n");
1628                 return -ENOMEM;
1629         }
1630
1631         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1632         if (status != DMUB_STATUS_OK) {
1633                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1634                 return -EINVAL;
1635         }
1636
1637         return 0;
1638 }
1639
1640 static int dm_sw_init(void *handle)
1641 {
1642         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1643         int r;
1644
1645         r = dm_dmub_sw_init(adev);
1646         if (r)
1647                 return r;
1648
1649         return load_dmcu_fw(adev);
1650 }
1651
1652 static int dm_sw_fini(void *handle)
1653 {
1654         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1655
1656         kfree(adev->dm.dmub_fb_info);
1657         adev->dm.dmub_fb_info = NULL;
1658
1659         if (adev->dm.dmub_srv) {
1660                 dmub_srv_destroy(adev->dm.dmub_srv);
1661                 adev->dm.dmub_srv = NULL;
1662         }
1663
1664         release_firmware(adev->dm.dmub_fw);
1665         adev->dm.dmub_fw = NULL;
1666
1667         release_firmware(adev->dm.fw_dmcu);
1668         adev->dm.fw_dmcu = NULL;
1669
1670         return 0;
1671 }
1672
1673 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1674 {
1675         struct amdgpu_dm_connector *aconnector;
1676         struct drm_connector *connector;
1677         struct drm_connector_list_iter iter;
1678         int ret = 0;
1679
1680         drm_connector_list_iter_begin(dev, &iter);
1681         drm_for_each_connector_iter(connector, &iter) {
1682                 aconnector = to_amdgpu_dm_connector(connector);
1683                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1684                     aconnector->mst_mgr.aux) {
1685                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1686                                          aconnector,
1687                                          aconnector->base.base.id);
1688
1689                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1690                         if (ret < 0) {
1691                                 DRM_ERROR("DM_MST: Failed to start MST\n");
1692                                 aconnector->dc_link->type =
1693                                         dc_connection_single;
1694                                 break;
1695                         }
1696                 }
1697         }
1698         drm_connector_list_iter_end(&iter);
1699
1700         return ret;
1701 }
1702
1703 static int dm_late_init(void *handle)
1704 {
1705         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1706
1707         struct dmcu_iram_parameters params;
1708         unsigned int linear_lut[16];
1709         int i;
1710         struct dmcu *dmcu = NULL;
1711         bool ret = true;
1712
1713         dmcu = adev->dm.dc->res_pool->dmcu;
1714
1715         for (i = 0; i < 16; i++)
1716                 linear_lut[i] = 0xFFFF * i / 15;
1717
1718         params.set = 0;
1719         params.backlight_ramping_start = 0xCCCC;
1720         params.backlight_ramping_reduction = 0xCCCCCCCC;
1721         params.backlight_lut_array_size = 16;
1722         params.backlight_lut_array = linear_lut;
1723
1724         /* Min backlight level after ABM reduction,  Don't allow below 1%
1725          * 0xFFFF x 0.01 = 0x28F
1726          */
1727         params.min_abm_backlight = 0x28F;
1728
1729         /* In the case where abm is implemented on dmcub,
1730          * dmcu object will be null.
1731          * ABM 2.4 and up are implemented on dmcub.
1732          */
1733         if (dmcu)
1734                 ret = dmcu_load_iram(dmcu, params);
1735         else if (adev->dm.dc->ctx->dmub_srv)
1736                 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1737
1738         if (!ret)
1739                 return -EINVAL;
1740
1741         return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1742 }
1743
1744 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1745 {
1746         struct amdgpu_dm_connector *aconnector;
1747         struct drm_connector *connector;
1748         struct drm_connector_list_iter iter;
1749         struct drm_dp_mst_topology_mgr *mgr;
1750         int ret;
1751         bool need_hotplug = false;
1752
1753         drm_connector_list_iter_begin(dev, &iter);
1754         drm_for_each_connector_iter(connector, &iter) {
1755                 aconnector = to_amdgpu_dm_connector(connector);
1756                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1757                     aconnector->mst_port)
1758                         continue;
1759
1760                 mgr = &aconnector->mst_mgr;
1761
1762                 if (suspend) {
1763                         drm_dp_mst_topology_mgr_suspend(mgr);
1764                 } else {
1765                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1766                         if (ret < 0) {
1767                                 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1768                                 need_hotplug = true;
1769                         }
1770                 }
1771         }
1772         drm_connector_list_iter_end(&iter);
1773
1774         if (need_hotplug)
1775                 drm_kms_helper_hotplug_event(dev);
1776 }
1777
1778 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1779 {
1780         struct smu_context *smu = &adev->smu;
1781         int ret = 0;
1782
1783         if (!is_support_sw_smu(adev))
1784                 return 0;
1785
1786         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1787          * on window driver dc implementation.
1788          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1789          * should be passed to smu during boot up and resume from s3.
1790          * boot up: dc calculate dcn watermark clock settings within dc_create,
1791          * dcn20_resource_construct
1792          * then call pplib functions below to pass the settings to smu:
1793          * smu_set_watermarks_for_clock_ranges
1794          * smu_set_watermarks_table
1795          * navi10_set_watermarks_table
1796          * smu_write_watermarks_table
1797          *
1798          * For Renoir, clock settings of dcn watermark are also fixed values.
1799          * dc has implemented different flow for window driver:
1800          * dc_hardware_init / dc_set_power_state
1801          * dcn10_init_hw
1802          * notify_wm_ranges
1803          * set_wm_ranges
1804          * -- Linux
1805          * smu_set_watermarks_for_clock_ranges
1806          * renoir_set_watermarks_table
1807          * smu_write_watermarks_table
1808          *
1809          * For Linux,
1810          * dc_hardware_init -> amdgpu_dm_init
1811          * dc_set_power_state --> dm_resume
1812          *
1813          * therefore, this function apply to navi10/12/14 but not Renoir
1814          * *
1815          */
1816         switch(adev->asic_type) {
1817         case CHIP_NAVI10:
1818         case CHIP_NAVI14:
1819         case CHIP_NAVI12:
1820                 break;
1821         default:
1822                 return 0;
1823         }
1824
1825         ret = smu_write_watermarks_table(smu);
1826         if (ret) {
1827                 DRM_ERROR("Failed to update WMTABLE!\n");
1828                 return ret;
1829         }
1830
1831         return 0;
1832 }
1833
1834 /**
1835  * dm_hw_init() - Initialize DC device
1836  * @handle: The base driver device containing the amdgpu_dm device.
1837  *
1838  * Initialize the &struct amdgpu_display_manager device. This involves calling
1839  * the initializers of each DM component, then populating the struct with them.
1840  *
1841  * Although the function implies hardware initialization, both hardware and
1842  * software are initialized here. Splitting them out to their relevant init
1843  * hooks is a future TODO item.
1844  *
1845  * Some notable things that are initialized here:
1846  *
1847  * - Display Core, both software and hardware
1848  * - DC modules that we need (freesync and color management)
1849  * - DRM software states
1850  * - Interrupt sources and handlers
1851  * - Vblank support
1852  * - Debug FS entries, if enabled
1853  */
1854 static int dm_hw_init(void *handle)
1855 {
1856         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1857         /* Create DAL display manager */
1858         amdgpu_dm_init(adev);
1859         amdgpu_dm_hpd_init(adev);
1860
1861         return 0;
1862 }
1863
1864 /**
1865  * dm_hw_fini() - Teardown DC device
1866  * @handle: The base driver device containing the amdgpu_dm device.
1867  *
1868  * Teardown components within &struct amdgpu_display_manager that require
1869  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1870  * were loaded. Also flush IRQ workqueues and disable them.
1871  */
1872 static int dm_hw_fini(void *handle)
1873 {
1874         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1875
1876         amdgpu_dm_hpd_fini(adev);
1877
1878         amdgpu_dm_irq_fini(adev);
1879         amdgpu_dm_fini(adev);
1880         return 0;
1881 }
1882
1883
1884 static int dm_enable_vblank(struct drm_crtc *crtc);
1885 static void dm_disable_vblank(struct drm_crtc *crtc);
1886
1887 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1888                                  struct dc_state *state, bool enable)
1889 {
1890         enum dc_irq_source irq_source;
1891         struct amdgpu_crtc *acrtc;
1892         int rc = -EBUSY;
1893         int i = 0;
1894
1895         for (i = 0; i < state->stream_count; i++) {
1896                 acrtc = get_crtc_by_otg_inst(
1897                                 adev, state->stream_status[i].primary_otg_inst);
1898
1899                 if (acrtc && state->stream_status[i].plane_count != 0) {
1900                         irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1901                         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1902                         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
1903                                       acrtc->crtc_id, enable ? "en" : "dis", rc);
1904                         if (rc)
1905                                 DRM_WARN("Failed to %s pflip interrupts\n",
1906                                          enable ? "enable" : "disable");
1907
1908                         if (enable) {
1909                                 rc = dm_enable_vblank(&acrtc->base);
1910                                 if (rc)
1911                                         DRM_WARN("Failed to enable vblank interrupts\n");
1912                         } else {
1913                                 dm_disable_vblank(&acrtc->base);
1914                         }
1915
1916                 }
1917         }
1918
1919 }
1920
1921 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1922 {
1923         struct dc_state *context = NULL;
1924         enum dc_status res = DC_ERROR_UNEXPECTED;
1925         int i;
1926         struct dc_stream_state *del_streams[MAX_PIPES];
1927         int del_streams_count = 0;
1928
1929         memset(del_streams, 0, sizeof(del_streams));
1930
1931         context = dc_create_state(dc);
1932         if (context == NULL)
1933                 goto context_alloc_fail;
1934
1935         dc_resource_state_copy_construct_current(dc, context);
1936
1937         /* First remove from context all streams */
1938         for (i = 0; i < context->stream_count; i++) {
1939                 struct dc_stream_state *stream = context->streams[i];
1940
1941                 del_streams[del_streams_count++] = stream;
1942         }
1943
1944         /* Remove all planes for removed streams and then remove the streams */
1945         for (i = 0; i < del_streams_count; i++) {
1946                 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1947                         res = DC_FAIL_DETACH_SURFACES;
1948                         goto fail;
1949                 }
1950
1951                 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1952                 if (res != DC_OK)
1953                         goto fail;
1954         }
1955
1956
1957         res = dc_validate_global_state(dc, context, false);
1958
1959         if (res != DC_OK) {
1960                 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1961                 goto fail;
1962         }
1963
1964         res = dc_commit_state(dc, context);
1965
1966 fail:
1967         dc_release_state(context);
1968
1969 context_alloc_fail:
1970         return res;
1971 }
1972
1973 static int dm_suspend(void *handle)
1974 {
1975         struct amdgpu_device *adev = handle;
1976         struct amdgpu_display_manager *dm = &adev->dm;
1977         int ret = 0;
1978
1979         if (amdgpu_in_reset(adev)) {
1980                 mutex_lock(&dm->dc_lock);
1981
1982 #if defined(CONFIG_DRM_AMD_DC_DCN)
1983                 dc_allow_idle_optimizations(adev->dm.dc, false);
1984 #endif
1985
1986                 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1987
1988                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1989
1990                 amdgpu_dm_commit_zero_streams(dm->dc);
1991
1992                 amdgpu_dm_irq_suspend(adev);
1993
1994                 return ret;
1995         }
1996
1997         WARN_ON(adev->dm.cached_state);
1998         adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1999
2000         s3_handle_mst(adev_to_drm(adev), true);
2001
2002         amdgpu_dm_irq_suspend(adev);
2003
2004
2005         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2006
2007         return 0;
2008 }
2009
2010 static struct amdgpu_dm_connector *
2011 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2012                                              struct drm_crtc *crtc)
2013 {
2014         uint32_t i;
2015         struct drm_connector_state *new_con_state;
2016         struct drm_connector *connector;
2017         struct drm_crtc *crtc_from_state;
2018
2019         for_each_new_connector_in_state(state, connector, new_con_state, i) {
2020                 crtc_from_state = new_con_state->crtc;
2021
2022                 if (crtc_from_state == crtc)
2023                         return to_amdgpu_dm_connector(connector);
2024         }
2025
2026         return NULL;
2027 }
2028
2029 static void emulated_link_detect(struct dc_link *link)
2030 {
2031         struct dc_sink_init_data sink_init_data = { 0 };
2032         struct display_sink_capability sink_caps = { 0 };
2033         enum dc_edid_status edid_status;
2034         struct dc_context *dc_ctx = link->ctx;
2035         struct dc_sink *sink = NULL;
2036         struct dc_sink *prev_sink = NULL;
2037
2038         link->type = dc_connection_none;
2039         prev_sink = link->local_sink;
2040
2041         if (prev_sink)
2042                 dc_sink_release(prev_sink);
2043
2044         switch (link->connector_signal) {
2045         case SIGNAL_TYPE_HDMI_TYPE_A: {
2046                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2047                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2048                 break;
2049         }
2050
2051         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2052                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2053                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2054                 break;
2055         }
2056
2057         case SIGNAL_TYPE_DVI_DUAL_LINK: {
2058                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2059                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2060                 break;
2061         }
2062
2063         case SIGNAL_TYPE_LVDS: {
2064                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2065                 sink_caps.signal = SIGNAL_TYPE_LVDS;
2066                 break;
2067         }
2068
2069         case SIGNAL_TYPE_EDP: {
2070                 sink_caps.transaction_type =
2071                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2072                 sink_caps.signal = SIGNAL_TYPE_EDP;
2073                 break;
2074         }
2075
2076         case SIGNAL_TYPE_DISPLAY_PORT: {
2077                 sink_caps.transaction_type =
2078                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2079                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2080                 break;
2081         }
2082
2083         default:
2084                 DC_ERROR("Invalid connector type! signal:%d\n",
2085                         link->connector_signal);
2086                 return;
2087         }
2088
2089         sink_init_data.link = link;
2090         sink_init_data.sink_signal = sink_caps.signal;
2091
2092         sink = dc_sink_create(&sink_init_data);
2093         if (!sink) {
2094                 DC_ERROR("Failed to create sink!\n");
2095                 return;
2096         }
2097
2098         /* dc_sink_create returns a new reference */
2099         link->local_sink = sink;
2100
2101         edid_status = dm_helpers_read_local_edid(
2102                         link->ctx,
2103                         link,
2104                         sink);
2105
2106         if (edid_status != EDID_OK)
2107                 DC_ERROR("Failed to read EDID");
2108
2109 }
2110
2111 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2112                                      struct amdgpu_display_manager *dm)
2113 {
2114         struct {
2115                 struct dc_surface_update surface_updates[MAX_SURFACES];
2116                 struct dc_plane_info plane_infos[MAX_SURFACES];
2117                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2118                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2119                 struct dc_stream_update stream_update;
2120         } * bundle;
2121         int k, m;
2122
2123         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2124
2125         if (!bundle) {
2126                 dm_error("Failed to allocate update bundle\n");
2127                 goto cleanup;
2128         }
2129
2130         for (k = 0; k < dc_state->stream_count; k++) {
2131                 bundle->stream_update.stream = dc_state->streams[k];
2132
2133                 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2134                         bundle->surface_updates[m].surface =
2135                                 dc_state->stream_status->plane_states[m];
2136                         bundle->surface_updates[m].surface->force_full_update =
2137                                 true;
2138                 }
2139                 dc_commit_updates_for_stream(
2140                         dm->dc, bundle->surface_updates,
2141                         dc_state->stream_status->plane_count,
2142                         dc_state->streams[k], &bundle->stream_update, dc_state);
2143         }
2144
2145 cleanup:
2146         kfree(bundle);
2147
2148         return;
2149 }
2150
2151 static void dm_set_dpms_off(struct dc_link *link)
2152 {
2153         struct dc_stream_state *stream_state;
2154         struct amdgpu_dm_connector *aconnector = link->priv;
2155         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2156         struct dc_stream_update stream_update;
2157         bool dpms_off = true;
2158
2159         memset(&stream_update, 0, sizeof(stream_update));
2160         stream_update.dpms_off = &dpms_off;
2161
2162         mutex_lock(&adev->dm.dc_lock);
2163         stream_state = dc_stream_find_from_link(link);
2164
2165         if (stream_state == NULL) {
2166                 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2167                 mutex_unlock(&adev->dm.dc_lock);
2168                 return;
2169         }
2170
2171         stream_update.stream = stream_state;
2172         dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2173                                      stream_state, &stream_update,
2174                                      stream_state->ctx->dc->current_state);
2175         mutex_unlock(&adev->dm.dc_lock);
2176 }
2177
2178 static int dm_resume(void *handle)
2179 {
2180         struct amdgpu_device *adev = handle;
2181         struct drm_device *ddev = adev_to_drm(adev);
2182         struct amdgpu_display_manager *dm = &adev->dm;
2183         struct amdgpu_dm_connector *aconnector;
2184         struct drm_connector *connector;
2185         struct drm_connector_list_iter iter;
2186         struct drm_crtc *crtc;
2187         struct drm_crtc_state *new_crtc_state;
2188         struct dm_crtc_state *dm_new_crtc_state;
2189         struct drm_plane *plane;
2190         struct drm_plane_state *new_plane_state;
2191         struct dm_plane_state *dm_new_plane_state;
2192         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2193         enum dc_connection_type new_connection_type = dc_connection_none;
2194         struct dc_state *dc_state;
2195         int i, r, j;
2196
2197         if (amdgpu_in_reset(adev)) {
2198                 dc_state = dm->cached_dc_state;
2199
2200                 r = dm_dmub_hw_init(adev);
2201                 if (r)
2202                         DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2203
2204                 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2205                 dc_resume(dm->dc);
2206
2207                 amdgpu_dm_irq_resume_early(adev);
2208
2209                 for (i = 0; i < dc_state->stream_count; i++) {
2210                         dc_state->streams[i]->mode_changed = true;
2211                         for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2212                                 dc_state->stream_status->plane_states[j]->update_flags.raw
2213                                         = 0xffffffff;
2214                         }
2215                 }
2216
2217                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2218
2219                 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2220
2221                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2222
2223                 dc_release_state(dm->cached_dc_state);
2224                 dm->cached_dc_state = NULL;
2225
2226                 amdgpu_dm_irq_resume_late(adev);
2227
2228                 mutex_unlock(&dm->dc_lock);
2229
2230                 return 0;
2231         }
2232         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2233         dc_release_state(dm_state->context);
2234         dm_state->context = dc_create_state(dm->dc);
2235         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2236         dc_resource_state_construct(dm->dc, dm_state->context);
2237
2238         /* Before powering on DC we need to re-initialize DMUB. */
2239         r = dm_dmub_hw_init(adev);
2240         if (r)
2241                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2242
2243         /* power on hardware */
2244         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2245
2246         /* program HPD filter */
2247         dc_resume(dm->dc);
2248
2249         /*
2250          * early enable HPD Rx IRQ, should be done before set mode as short
2251          * pulse interrupts are used for MST
2252          */
2253         amdgpu_dm_irq_resume_early(adev);
2254
2255         /* On resume we need to rewrite the MSTM control bits to enable MST*/
2256         s3_handle_mst(ddev, false);
2257
2258         /* Do detection*/
2259         drm_connector_list_iter_begin(ddev, &iter);
2260         drm_for_each_connector_iter(connector, &iter) {
2261                 aconnector = to_amdgpu_dm_connector(connector);
2262
2263                 /*
2264                  * this is the case when traversing through already created
2265                  * MST connectors, should be skipped
2266                  */
2267                 if (aconnector->mst_port)
2268                         continue;
2269
2270                 mutex_lock(&aconnector->hpd_lock);
2271                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2272                         DRM_ERROR("KMS: Failed to detect connector\n");
2273
2274                 if (aconnector->base.force && new_connection_type == dc_connection_none)
2275                         emulated_link_detect(aconnector->dc_link);
2276                 else
2277                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2278
2279                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2280                         aconnector->fake_enable = false;
2281
2282                 if (aconnector->dc_sink)
2283                         dc_sink_release(aconnector->dc_sink);
2284                 aconnector->dc_sink = NULL;
2285                 amdgpu_dm_update_connector_after_detect(aconnector);
2286                 mutex_unlock(&aconnector->hpd_lock);
2287         }
2288         drm_connector_list_iter_end(&iter);
2289
2290         /* Force mode set in atomic commit */
2291         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2292                 new_crtc_state->active_changed = true;
2293
2294         /*
2295          * atomic_check is expected to create the dc states. We need to release
2296          * them here, since they were duplicated as part of the suspend
2297          * procedure.
2298          */
2299         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2300                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2301                 if (dm_new_crtc_state->stream) {
2302                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2303                         dc_stream_release(dm_new_crtc_state->stream);
2304                         dm_new_crtc_state->stream = NULL;
2305                 }
2306         }
2307
2308         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2309                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2310                 if (dm_new_plane_state->dc_state) {
2311                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2312                         dc_plane_state_release(dm_new_plane_state->dc_state);
2313                         dm_new_plane_state->dc_state = NULL;
2314                 }
2315         }
2316
2317         drm_atomic_helper_resume(ddev, dm->cached_state);
2318
2319         dm->cached_state = NULL;
2320
2321         amdgpu_dm_irq_resume_late(adev);
2322
2323         amdgpu_dm_smu_write_watermarks_table(adev);
2324
2325         return 0;
2326 }
2327
2328 /**
2329  * DOC: DM Lifecycle
2330  *
2331  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2332  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2333  * the base driver's device list to be initialized and torn down accordingly.
2334  *
2335  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2336  */
2337
2338 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2339         .name = "dm",
2340         .early_init = dm_early_init,
2341         .late_init = dm_late_init,
2342         .sw_init = dm_sw_init,
2343         .sw_fini = dm_sw_fini,
2344         .hw_init = dm_hw_init,
2345         .hw_fini = dm_hw_fini,
2346         .suspend = dm_suspend,
2347         .resume = dm_resume,
2348         .is_idle = dm_is_idle,
2349         .wait_for_idle = dm_wait_for_idle,
2350         .check_soft_reset = dm_check_soft_reset,
2351         .soft_reset = dm_soft_reset,
2352         .set_clockgating_state = dm_set_clockgating_state,
2353         .set_powergating_state = dm_set_powergating_state,
2354 };
2355
2356 const struct amdgpu_ip_block_version dm_ip_block =
2357 {
2358         .type = AMD_IP_BLOCK_TYPE_DCE,
2359         .major = 1,
2360         .minor = 0,
2361         .rev = 0,
2362         .funcs = &amdgpu_dm_funcs,
2363 };
2364
2365
2366 /**
2367  * DOC: atomic
2368  *
2369  * *WIP*
2370  */
2371
2372 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2373         .fb_create = amdgpu_display_user_framebuffer_create,
2374         .get_format_info = amd_get_format_info,
2375         .output_poll_changed = drm_fb_helper_output_poll_changed,
2376         .atomic_check = amdgpu_dm_atomic_check,
2377         .atomic_commit = drm_atomic_helper_commit,
2378 };
2379
2380 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2381         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2382 };
2383
2384 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2385 {
2386         u32 max_cll, min_cll, max, min, q, r;
2387         struct amdgpu_dm_backlight_caps *caps;
2388         struct amdgpu_display_manager *dm;
2389         struct drm_connector *conn_base;
2390         struct amdgpu_device *adev;
2391         struct dc_link *link = NULL;
2392         static const u8 pre_computed_values[] = {
2393                 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2394                 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2395
2396         if (!aconnector || !aconnector->dc_link)
2397                 return;
2398
2399         link = aconnector->dc_link;
2400         if (link->connector_signal != SIGNAL_TYPE_EDP)
2401                 return;
2402
2403         conn_base = &aconnector->base;
2404         adev = drm_to_adev(conn_base->dev);
2405         dm = &adev->dm;
2406         caps = &dm->backlight_caps;
2407         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2408         caps->aux_support = false;
2409         max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2410         min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2411
2412         if (caps->ext_caps->bits.oled == 1 ||
2413             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2414             caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2415                 caps->aux_support = true;
2416
2417         if (amdgpu_backlight == 0)
2418                 caps->aux_support = false;
2419         else if (amdgpu_backlight == 1)
2420                 caps->aux_support = true;
2421
2422         /* From the specification (CTA-861-G), for calculating the maximum
2423          * luminance we need to use:
2424          *      Luminance = 50*2**(CV/32)
2425          * Where CV is a one-byte value.
2426          * For calculating this expression we may need float point precision;
2427          * to avoid this complexity level, we take advantage that CV is divided
2428          * by a constant. From the Euclids division algorithm, we know that CV
2429          * can be written as: CV = 32*q + r. Next, we replace CV in the
2430          * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2431          * need to pre-compute the value of r/32. For pre-computing the values
2432          * We just used the following Ruby line:
2433          *      (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2434          * The results of the above expressions can be verified at
2435          * pre_computed_values.
2436          */
2437         q = max_cll >> 5;
2438         r = max_cll % 32;
2439         max = (1 << q) * pre_computed_values[r];
2440
2441         // min luminance: maxLum * (CV/255)^2 / 100
2442         q = DIV_ROUND_CLOSEST(min_cll, 255);
2443         min = max * DIV_ROUND_CLOSEST((q * q), 100);
2444
2445         caps->aux_max_input_signal = max;
2446         caps->aux_min_input_signal = min;
2447 }
2448
2449 void amdgpu_dm_update_connector_after_detect(
2450                 struct amdgpu_dm_connector *aconnector)
2451 {
2452         struct drm_connector *connector = &aconnector->base;
2453         struct drm_device *dev = connector->dev;
2454         struct dc_sink *sink;
2455
2456         /* MST handled by drm_mst framework */
2457         if (aconnector->mst_mgr.mst_state == true)
2458                 return;
2459
2460         sink = aconnector->dc_link->local_sink;
2461         if (sink)
2462                 dc_sink_retain(sink);
2463
2464         /*
2465          * Edid mgmt connector gets first update only in mode_valid hook and then
2466          * the connector sink is set to either fake or physical sink depends on link status.
2467          * Skip if already done during boot.
2468          */
2469         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2470                         && aconnector->dc_em_sink) {
2471
2472                 /*
2473                  * For S3 resume with headless use eml_sink to fake stream
2474                  * because on resume connector->sink is set to NULL
2475                  */
2476                 mutex_lock(&dev->mode_config.mutex);
2477
2478                 if (sink) {
2479                         if (aconnector->dc_sink) {
2480                                 amdgpu_dm_update_freesync_caps(connector, NULL);
2481                                 /*
2482                                  * retain and release below are used to
2483                                  * bump up refcount for sink because the link doesn't point
2484                                  * to it anymore after disconnect, so on next crtc to connector
2485                                  * reshuffle by UMD we will get into unwanted dc_sink release
2486                                  */
2487                                 dc_sink_release(aconnector->dc_sink);
2488                         }
2489                         aconnector->dc_sink = sink;
2490                         dc_sink_retain(aconnector->dc_sink);
2491                         amdgpu_dm_update_freesync_caps(connector,
2492                                         aconnector->edid);
2493                 } else {
2494                         amdgpu_dm_update_freesync_caps(connector, NULL);
2495                         if (!aconnector->dc_sink) {
2496                                 aconnector->dc_sink = aconnector->dc_em_sink;
2497                                 dc_sink_retain(aconnector->dc_sink);
2498                         }
2499                 }
2500
2501                 mutex_unlock(&dev->mode_config.mutex);
2502
2503                 if (sink)
2504                         dc_sink_release(sink);
2505                 return;
2506         }
2507
2508         /*
2509          * TODO: temporary guard to look for proper fix
2510          * if this sink is MST sink, we should not do anything
2511          */
2512         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2513                 dc_sink_release(sink);
2514                 return;
2515         }
2516
2517         if (aconnector->dc_sink == sink) {
2518                 /*
2519                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
2520                  * Do nothing!!
2521                  */
2522                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2523                                 aconnector->connector_id);
2524                 if (sink)
2525                         dc_sink_release(sink);
2526                 return;
2527         }
2528
2529         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2530                 aconnector->connector_id, aconnector->dc_sink, sink);
2531
2532         mutex_lock(&dev->mode_config.mutex);
2533
2534         /*
2535          * 1. Update status of the drm connector
2536          * 2. Send an event and let userspace tell us what to do
2537          */
2538         if (sink) {
2539                 /*
2540                  * TODO: check if we still need the S3 mode update workaround.
2541                  * If yes, put it here.
2542                  */
2543                 if (aconnector->dc_sink) {
2544                         amdgpu_dm_update_freesync_caps(connector, NULL);
2545                         dc_sink_release(aconnector->dc_sink);
2546                 }
2547
2548                 aconnector->dc_sink = sink;
2549                 dc_sink_retain(aconnector->dc_sink);
2550                 if (sink->dc_edid.length == 0) {
2551                         aconnector->edid = NULL;
2552                         if (aconnector->dc_link->aux_mode) {
2553                                 drm_dp_cec_unset_edid(
2554                                         &aconnector->dm_dp_aux.aux);
2555                         }
2556                 } else {
2557                         aconnector->edid =
2558                                 (struct edid *)sink->dc_edid.raw_edid;
2559
2560                         drm_connector_update_edid_property(connector,
2561                                                            aconnector->edid);
2562                         if (aconnector->dc_link->aux_mode)
2563                                 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2564                                                     aconnector->edid);
2565                 }
2566
2567                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2568                 update_connector_ext_caps(aconnector);
2569         } else {
2570                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2571                 amdgpu_dm_update_freesync_caps(connector, NULL);
2572                 drm_connector_update_edid_property(connector, NULL);
2573                 aconnector->num_modes = 0;
2574                 dc_sink_release(aconnector->dc_sink);
2575                 aconnector->dc_sink = NULL;
2576                 aconnector->edid = NULL;
2577 #ifdef CONFIG_DRM_AMD_DC_HDCP
2578                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2579                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2580                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2581 #endif
2582         }
2583
2584         mutex_unlock(&dev->mode_config.mutex);
2585
2586         update_subconnector_property(aconnector);
2587
2588         if (sink)
2589                 dc_sink_release(sink);
2590 }
2591
2592 static void handle_hpd_irq(void *param)
2593 {
2594         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2595         struct drm_connector *connector = &aconnector->base;
2596         struct drm_device *dev = connector->dev;
2597         enum dc_connection_type new_connection_type = dc_connection_none;
2598         struct amdgpu_device *adev = drm_to_adev(dev);
2599 #ifdef CONFIG_DRM_AMD_DC_HDCP
2600         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2601 #endif
2602
2603         if (adev->dm.disable_hpd_irq)
2604                 return;
2605
2606         /*
2607          * In case of failure or MST no need to update connector status or notify the OS
2608          * since (for MST case) MST does this in its own context.
2609          */
2610         mutex_lock(&aconnector->hpd_lock);
2611
2612 #ifdef CONFIG_DRM_AMD_DC_HDCP
2613         if (adev->dm.hdcp_workqueue) {
2614                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2615                 dm_con_state->update_hdcp = true;
2616         }
2617 #endif
2618         if (aconnector->fake_enable)
2619                 aconnector->fake_enable = false;
2620
2621         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2622                 DRM_ERROR("KMS: Failed to detect connector\n");
2623
2624         if (aconnector->base.force && new_connection_type == dc_connection_none) {
2625                 emulated_link_detect(aconnector->dc_link);
2626
2627
2628                 drm_modeset_lock_all(dev);
2629                 dm_restore_drm_connector_state(dev, connector);
2630                 drm_modeset_unlock_all(dev);
2631
2632                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2633                         drm_kms_helper_hotplug_event(dev);
2634
2635         } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2636                 if (new_connection_type == dc_connection_none &&
2637                     aconnector->dc_link->type == dc_connection_none)
2638                         dm_set_dpms_off(aconnector->dc_link);
2639
2640                 amdgpu_dm_update_connector_after_detect(aconnector);
2641
2642                 drm_modeset_lock_all(dev);
2643                 dm_restore_drm_connector_state(dev, connector);
2644                 drm_modeset_unlock_all(dev);
2645
2646                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2647                         drm_kms_helper_hotplug_event(dev);
2648         }
2649         mutex_unlock(&aconnector->hpd_lock);
2650
2651 }
2652
2653 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2654 {
2655         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2656         uint8_t dret;
2657         bool new_irq_handled = false;
2658         int dpcd_addr;
2659         int dpcd_bytes_to_read;
2660
2661         const int max_process_count = 30;
2662         int process_count = 0;
2663
2664         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2665
2666         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2667                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2668                 /* DPCD 0x200 - 0x201 for downstream IRQ */
2669                 dpcd_addr = DP_SINK_COUNT;
2670         } else {
2671                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2672                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2673                 dpcd_addr = DP_SINK_COUNT_ESI;
2674         }
2675
2676         dret = drm_dp_dpcd_read(
2677                 &aconnector->dm_dp_aux.aux,
2678                 dpcd_addr,
2679                 esi,
2680                 dpcd_bytes_to_read);
2681
2682         while (dret == dpcd_bytes_to_read &&
2683                 process_count < max_process_count) {
2684                 uint8_t retry;
2685                 dret = 0;
2686
2687                 process_count++;
2688
2689                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2690                 /* handle HPD short pulse irq */
2691                 if (aconnector->mst_mgr.mst_state)
2692                         drm_dp_mst_hpd_irq(
2693                                 &aconnector->mst_mgr,
2694                                 esi,
2695                                 &new_irq_handled);
2696
2697                 if (new_irq_handled) {
2698                         /* ACK at DPCD to notify down stream */
2699                         const int ack_dpcd_bytes_to_write =
2700                                 dpcd_bytes_to_read - 1;
2701
2702                         for (retry = 0; retry < 3; retry++) {
2703                                 uint8_t wret;
2704
2705                                 wret = drm_dp_dpcd_write(
2706                                         &aconnector->dm_dp_aux.aux,
2707                                         dpcd_addr + 1,
2708                                         &esi[1],
2709                                         ack_dpcd_bytes_to_write);
2710                                 if (wret == ack_dpcd_bytes_to_write)
2711                                         break;
2712                         }
2713
2714                         /* check if there is new irq to be handled */
2715                         dret = drm_dp_dpcd_read(
2716                                 &aconnector->dm_dp_aux.aux,
2717                                 dpcd_addr,
2718                                 esi,
2719                                 dpcd_bytes_to_read);
2720
2721                         new_irq_handled = false;
2722                 } else {
2723                         break;
2724                 }
2725         }
2726
2727         if (process_count == max_process_count)
2728                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2729 }
2730
2731 static void handle_hpd_rx_irq(void *param)
2732 {
2733         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2734         struct drm_connector *connector = &aconnector->base;
2735         struct drm_device *dev = connector->dev;
2736         struct dc_link *dc_link = aconnector->dc_link;
2737         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2738         bool result = false;
2739         enum dc_connection_type new_connection_type = dc_connection_none;
2740         struct amdgpu_device *adev = drm_to_adev(dev);
2741         union hpd_irq_data hpd_irq_data;
2742
2743         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2744
2745         if (adev->dm.disable_hpd_irq)
2746                 return;
2747
2748
2749         /*
2750          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2751          * conflict, after implement i2c helper, this mutex should be
2752          * retired.
2753          */
2754         mutex_lock(&aconnector->hpd_lock);
2755
2756         read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2757
2758         if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2759                 (dc_link->type == dc_connection_mst_branch)) {
2760                 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2761                         result = true;
2762                         dm_handle_hpd_rx_irq(aconnector);
2763                         goto out;
2764                 } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2765                         result = false;
2766                         dm_handle_hpd_rx_irq(aconnector);
2767                         goto out;
2768                 }
2769         }
2770
2771         if (!amdgpu_in_reset(adev)) {
2772                 mutex_lock(&adev->dm.dc_lock);
2773 #ifdef CONFIG_DRM_AMD_DC_HDCP
2774         result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2775 #else
2776         result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2777 #endif
2778                 mutex_unlock(&adev->dm.dc_lock);
2779         }
2780
2781 out:
2782         if (result && !is_mst_root_connector) {
2783                 /* Downstream Port status changed. */
2784                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2785                         DRM_ERROR("KMS: Failed to detect connector\n");
2786
2787                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2788                         emulated_link_detect(dc_link);
2789
2790                         if (aconnector->fake_enable)
2791                                 aconnector->fake_enable = false;
2792
2793                         amdgpu_dm_update_connector_after_detect(aconnector);
2794
2795
2796                         drm_modeset_lock_all(dev);
2797                         dm_restore_drm_connector_state(dev, connector);
2798                         drm_modeset_unlock_all(dev);
2799
2800                         drm_kms_helper_hotplug_event(dev);
2801                 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2802
2803                         if (aconnector->fake_enable)
2804                                 aconnector->fake_enable = false;
2805
2806                         amdgpu_dm_update_connector_after_detect(aconnector);
2807
2808
2809                         drm_modeset_lock_all(dev);
2810                         dm_restore_drm_connector_state(dev, connector);
2811                         drm_modeset_unlock_all(dev);
2812
2813                         drm_kms_helper_hotplug_event(dev);
2814                 }
2815         }
2816 #ifdef CONFIG_DRM_AMD_DC_HDCP
2817         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2818                 if (adev->dm.hdcp_workqueue)
2819                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2820         }
2821 #endif
2822
2823         if (dc_link->type != dc_connection_mst_branch)
2824                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2825
2826         mutex_unlock(&aconnector->hpd_lock);
2827 }
2828
2829 static void register_hpd_handlers(struct amdgpu_device *adev)
2830 {
2831         struct drm_device *dev = adev_to_drm(adev);
2832         struct drm_connector *connector;
2833         struct amdgpu_dm_connector *aconnector;
2834         const struct dc_link *dc_link;
2835         struct dc_interrupt_params int_params = {0};
2836
2837         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2838         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2839
2840         list_for_each_entry(connector,
2841                         &dev->mode_config.connector_list, head) {
2842
2843                 aconnector = to_amdgpu_dm_connector(connector);
2844                 dc_link = aconnector->dc_link;
2845
2846                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2847                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2848                         int_params.irq_source = dc_link->irq_source_hpd;
2849
2850                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2851                                         handle_hpd_irq,
2852                                         (void *) aconnector);
2853                 }
2854
2855                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2856
2857                         /* Also register for DP short pulse (hpd_rx). */
2858                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2859                         int_params.irq_source = dc_link->irq_source_hpd_rx;
2860
2861                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2862                                         handle_hpd_rx_irq,
2863                                         (void *) aconnector);
2864                 }
2865         }
2866 }
2867
2868 #if defined(CONFIG_DRM_AMD_DC_SI)
2869 /* Register IRQ sources and initialize IRQ callbacks */
2870 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2871 {
2872         struct dc *dc = adev->dm.dc;
2873         struct common_irq_params *c_irq_params;
2874         struct dc_interrupt_params int_params = {0};
2875         int r;
2876         int i;
2877         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2878
2879         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2880         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2881
2882         /*
2883          * Actions of amdgpu_irq_add_id():
2884          * 1. Register a set() function with base driver.
2885          *    Base driver will call set() function to enable/disable an
2886          *    interrupt in DC hardware.
2887          * 2. Register amdgpu_dm_irq_handler().
2888          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2889          *    coming from DC hardware.
2890          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2891          *    for acknowledging and handling. */
2892
2893         /* Use VBLANK interrupt */
2894         for (i = 0; i < adev->mode_info.num_crtc; i++) {
2895                 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2896                 if (r) {
2897                         DRM_ERROR("Failed to add crtc irq id!\n");
2898                         return r;
2899                 }
2900
2901                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2902                 int_params.irq_source =
2903                         dc_interrupt_to_irq_source(dc, i+1 , 0);
2904
2905                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2906
2907                 c_irq_params->adev = adev;
2908                 c_irq_params->irq_src = int_params.irq_source;
2909
2910                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2911                                 dm_crtc_high_irq, c_irq_params);
2912         }
2913
2914         /* Use GRPH_PFLIP interrupt */
2915         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2916                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2917                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2918                 if (r) {
2919                         DRM_ERROR("Failed to add page flip irq id!\n");
2920                         return r;
2921                 }
2922
2923                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2924                 int_params.irq_source =
2925                         dc_interrupt_to_irq_source(dc, i, 0);
2926
2927                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2928
2929                 c_irq_params->adev = adev;
2930                 c_irq_params->irq_src = int_params.irq_source;
2931
2932                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2933                                 dm_pflip_high_irq, c_irq_params);
2934
2935         }
2936
2937         /* HPD */
2938         r = amdgpu_irq_add_id(adev, client_id,
2939                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2940         if (r) {
2941                 DRM_ERROR("Failed to add hpd irq id!\n");
2942                 return r;
2943         }
2944
2945         register_hpd_handlers(adev);
2946
2947         return 0;
2948 }
2949 #endif
2950
2951 /* Register IRQ sources and initialize IRQ callbacks */
2952 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2953 {
2954         struct dc *dc = adev->dm.dc;
2955         struct common_irq_params *c_irq_params;
2956         struct dc_interrupt_params int_params = {0};
2957         int r;
2958         int i;
2959         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2960
2961         if (adev->asic_type >= CHIP_VEGA10)
2962                 client_id = SOC15_IH_CLIENTID_DCE;
2963
2964         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2965         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2966
2967         /*
2968          * Actions of amdgpu_irq_add_id():
2969          * 1. Register a set() function with base driver.
2970          *    Base driver will call set() function to enable/disable an
2971          *    interrupt in DC hardware.
2972          * 2. Register amdgpu_dm_irq_handler().
2973          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2974          *    coming from DC hardware.
2975          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2976          *    for acknowledging and handling. */
2977
2978         /* Use VBLANK interrupt */
2979         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2980                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2981                 if (r) {
2982                         DRM_ERROR("Failed to add crtc irq id!\n");
2983                         return r;
2984                 }
2985
2986                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2987                 int_params.irq_source =
2988                         dc_interrupt_to_irq_source(dc, i, 0);
2989
2990                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2991
2992                 c_irq_params->adev = adev;
2993                 c_irq_params->irq_src = int_params.irq_source;
2994
2995                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2996                                 dm_crtc_high_irq, c_irq_params);
2997         }
2998
2999         /* Use VUPDATE interrupt */
3000         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3001                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3002                 if (r) {
3003                         DRM_ERROR("Failed to add vupdate irq id!\n");
3004                         return r;
3005                 }
3006
3007                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3008                 int_params.irq_source =
3009                         dc_interrupt_to_irq_source(dc, i, 0);
3010
3011                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3012
3013                 c_irq_params->adev = adev;
3014                 c_irq_params->irq_src = int_params.irq_source;
3015
3016                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3017                                 dm_vupdate_high_irq, c_irq_params);
3018         }
3019
3020         /* Use GRPH_PFLIP interrupt */
3021         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3022                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3023                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3024                 if (r) {
3025                         DRM_ERROR("Failed to add page flip irq id!\n");
3026                         return r;
3027                 }
3028
3029                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3030                 int_params.irq_source =
3031                         dc_interrupt_to_irq_source(dc, i, 0);
3032
3033                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3034
3035                 c_irq_params->adev = adev;
3036                 c_irq_params->irq_src = int_params.irq_source;
3037
3038                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3039                                 dm_pflip_high_irq, c_irq_params);
3040
3041         }
3042
3043         /* HPD */
3044         r = amdgpu_irq_add_id(adev, client_id,
3045                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3046         if (r) {
3047                 DRM_ERROR("Failed to add hpd irq id!\n");
3048                 return r;
3049         }
3050
3051         register_hpd_handlers(adev);
3052
3053         return 0;
3054 }
3055
3056 #if defined(CONFIG_DRM_AMD_DC_DCN)
3057 /* Register IRQ sources and initialize IRQ callbacks */
3058 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3059 {
3060         struct dc *dc = adev->dm.dc;
3061         struct common_irq_params *c_irq_params;
3062         struct dc_interrupt_params int_params = {0};
3063         int r;
3064         int i;
3065 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3066         static const unsigned int vrtl_int_srcid[] = {
3067                 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3068                 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3069                 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3070                 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3071                 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3072                 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3073         };
3074 #endif
3075
3076         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3077         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3078
3079         /*
3080          * Actions of amdgpu_irq_add_id():
3081          * 1. Register a set() function with base driver.
3082          *    Base driver will call set() function to enable/disable an
3083          *    interrupt in DC hardware.
3084          * 2. Register amdgpu_dm_irq_handler().
3085          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3086          *    coming from DC hardware.
3087          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3088          *    for acknowledging and handling.
3089          */
3090
3091         /* Use VSTARTUP interrupt */
3092         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3093                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3094                         i++) {
3095                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3096
3097                 if (r) {
3098                         DRM_ERROR("Failed to add crtc irq id!\n");
3099                         return r;
3100                 }
3101
3102                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3103                 int_params.irq_source =
3104                         dc_interrupt_to_irq_source(dc, i, 0);
3105
3106                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3107
3108                 c_irq_params->adev = adev;
3109                 c_irq_params->irq_src = int_params.irq_source;
3110
3111                 amdgpu_dm_irq_register_interrupt(
3112                         adev, &int_params, dm_crtc_high_irq, c_irq_params);
3113         }
3114
3115         /* Use otg vertical line interrupt */
3116 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3117         for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3118                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3119                                 vrtl_int_srcid[i], &adev->vline0_irq);
3120
3121                 if (r) {
3122                         DRM_ERROR("Failed to add vline0 irq id!\n");
3123                         return r;
3124                 }
3125
3126                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3127                 int_params.irq_source =
3128                         dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3129
3130                 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3131                         DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3132                         break;
3133                 }
3134
3135                 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3136                                         - DC_IRQ_SOURCE_DC1_VLINE0];
3137
3138                 c_irq_params->adev = adev;
3139                 c_irq_params->irq_src = int_params.irq_source;
3140
3141                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3142                                 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3143         }
3144 #endif
3145
3146         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3147          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3148          * to trigger at end of each vblank, regardless of state of the lock,
3149          * matching DCE behaviour.
3150          */
3151         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3152              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3153              i++) {
3154                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3155
3156                 if (r) {
3157                         DRM_ERROR("Failed to add vupdate irq id!\n");
3158                         return r;
3159                 }
3160
3161                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3162                 int_params.irq_source =
3163                         dc_interrupt_to_irq_source(dc, i, 0);
3164
3165                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3166
3167                 c_irq_params->adev = adev;
3168                 c_irq_params->irq_src = int_params.irq_source;
3169
3170                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3171                                 dm_vupdate_high_irq, c_irq_params);
3172         }
3173
3174         /* Use GRPH_PFLIP interrupt */
3175         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3176                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3177                         i++) {
3178                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3179                 if (r) {
3180                         DRM_ERROR("Failed to add page flip irq id!\n");
3181                         return r;
3182                 }
3183
3184                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3185                 int_params.irq_source =
3186                         dc_interrupt_to_irq_source(dc, i, 0);
3187
3188                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3189
3190                 c_irq_params->adev = adev;
3191                 c_irq_params->irq_src = int_params.irq_source;
3192
3193                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3194                                 dm_pflip_high_irq, c_irq_params);
3195
3196         }
3197
3198         /* HPD */
3199         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3200                         &adev->hpd_irq);
3201         if (r) {
3202                 DRM_ERROR("Failed to add hpd irq id!\n");
3203                 return r;
3204         }
3205
3206         register_hpd_handlers(adev);
3207
3208         return 0;
3209 }
3210 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3211 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3212 {
3213         struct dc *dc = adev->dm.dc;
3214         struct common_irq_params *c_irq_params;
3215         struct dc_interrupt_params int_params = {0};
3216         int r, i;
3217
3218         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3219         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3220
3221         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3222                         &adev->dmub_outbox_irq);
3223         if (r) {
3224                 DRM_ERROR("Failed to add outbox irq id!\n");
3225                 return r;
3226         }
3227
3228         if (dc->ctx->dmub_srv) {
3229                 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3230                 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3231                 int_params.irq_source =
3232                 dc_interrupt_to_irq_source(dc, i, 0);
3233
3234                 c_irq_params = &adev->dm.dmub_outbox_params[0];
3235
3236                 c_irq_params->adev = adev;
3237                 c_irq_params->irq_src = int_params.irq_source;
3238
3239                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3240                                 dm_dmub_outbox1_low_irq, c_irq_params);
3241         }
3242
3243         return 0;
3244 }
3245 #endif
3246
3247 /*
3248  * Acquires the lock for the atomic state object and returns
3249  * the new atomic state.
3250  *
3251  * This should only be called during atomic check.
3252  */
3253 static int dm_atomic_get_state(struct drm_atomic_state *state,
3254                                struct dm_atomic_state **dm_state)
3255 {
3256         struct drm_device *dev = state->dev;
3257         struct amdgpu_device *adev = drm_to_adev(dev);
3258         struct amdgpu_display_manager *dm = &adev->dm;
3259         struct drm_private_state *priv_state;
3260
3261         if (*dm_state)
3262                 return 0;
3263
3264         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3265         if (IS_ERR(priv_state))
3266                 return PTR_ERR(priv_state);
3267
3268         *dm_state = to_dm_atomic_state(priv_state);
3269
3270         return 0;
3271 }
3272
3273 static struct dm_atomic_state *
3274 dm_atomic_get_new_state(struct drm_atomic_state *state)
3275 {
3276         struct drm_device *dev = state->dev;
3277         struct amdgpu_device *adev = drm_to_adev(dev);
3278         struct amdgpu_display_manager *dm = &adev->dm;
3279         struct drm_private_obj *obj;
3280         struct drm_private_state *new_obj_state;
3281         int i;
3282
3283         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3284                 if (obj->funcs == dm->atomic_obj.funcs)
3285                         return to_dm_atomic_state(new_obj_state);
3286         }
3287
3288         return NULL;
3289 }
3290
3291 static struct drm_private_state *
3292 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3293 {
3294         struct dm_atomic_state *old_state, *new_state;
3295
3296         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3297         if (!new_state)
3298                 return NULL;
3299
3300         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3301
3302         old_state = to_dm_atomic_state(obj->state);
3303
3304         if (old_state && old_state->context)
3305                 new_state->context = dc_copy_state(old_state->context);
3306
3307         if (!new_state->context) {
3308                 kfree(new_state);
3309                 return NULL;
3310         }
3311
3312         return &new_state->base;
3313 }
3314
3315 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3316                                     struct drm_private_state *state)
3317 {
3318         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3319
3320         if (dm_state && dm_state->context)
3321                 dc_release_state(dm_state->context);
3322
3323         kfree(dm_state);
3324 }
3325
3326 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3327         .atomic_duplicate_state = dm_atomic_duplicate_state,
3328         .atomic_destroy_state = dm_atomic_destroy_state,
3329 };
3330
3331 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3332 {
3333         struct dm_atomic_state *state;
3334         int r;
3335
3336         adev->mode_info.mode_config_initialized = true;
3337
3338         adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3339         adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3340
3341         adev_to_drm(adev)->mode_config.max_width = 16384;
3342         adev_to_drm(adev)->mode_config.max_height = 16384;
3343
3344         adev_to_drm(adev)->mode_config.preferred_depth = 24;
3345         adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3346         /* indicates support for immediate flip */
3347         adev_to_drm(adev)->mode_config.async_page_flip = true;
3348
3349         adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3350
3351         state = kzalloc(sizeof(*state), GFP_KERNEL);
3352         if (!state)
3353                 return -ENOMEM;
3354
3355         state->context = dc_create_state(adev->dm.dc);
3356         if (!state->context) {
3357                 kfree(state);
3358                 return -ENOMEM;
3359         }
3360
3361         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3362
3363         drm_atomic_private_obj_init(adev_to_drm(adev),
3364                                     &adev->dm.atomic_obj,
3365                                     &state->base,
3366                                     &dm_atomic_state_funcs);
3367
3368         r = amdgpu_display_modeset_create_props(adev);
3369         if (r) {
3370                 dc_release_state(state->context);
3371                 kfree(state);
3372                 return r;
3373         }
3374
3375         r = amdgpu_dm_audio_init(adev);
3376         if (r) {
3377                 dc_release_state(state->context);
3378                 kfree(state);
3379                 return r;
3380         }
3381
3382         return 0;
3383 }
3384
3385 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3386 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3387 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3388
3389 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3390         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3391
3392 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3393 {
3394 #if defined(CONFIG_ACPI)
3395         struct amdgpu_dm_backlight_caps caps;
3396
3397         memset(&caps, 0, sizeof(caps));
3398
3399         if (dm->backlight_caps.caps_valid)
3400                 return;
3401
3402         amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3403         if (caps.caps_valid) {
3404                 dm->backlight_caps.caps_valid = true;
3405                 if (caps.aux_support)
3406                         return;
3407                 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3408                 dm->backlight_caps.max_input_signal = caps.max_input_signal;
3409         } else {
3410                 dm->backlight_caps.min_input_signal =
3411                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3412                 dm->backlight_caps.max_input_signal =
3413                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3414         }
3415 #else
3416         if (dm->backlight_caps.aux_support)
3417                 return;
3418
3419         dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3420         dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3421 #endif
3422 }
3423
3424 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3425                                 unsigned *min, unsigned *max)
3426 {
3427         if (!caps)
3428                 return 0;
3429
3430         if (caps->aux_support) {
3431                 // Firmware limits are in nits, DC API wants millinits.
3432                 *max = 1000 * caps->aux_max_input_signal;
3433                 *min = 1000 * caps->aux_min_input_signal;
3434         } else {
3435                 // Firmware limits are 8-bit, PWM control is 16-bit.
3436                 *max = 0x101 * caps->max_input_signal;
3437                 *min = 0x101 * caps->min_input_signal;
3438         }
3439         return 1;
3440 }
3441
3442 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3443                                         uint32_t brightness)
3444 {
3445         unsigned min, max;
3446
3447         if (!get_brightness_range(caps, &min, &max))
3448                 return brightness;
3449
3450         // Rescale 0..255 to min..max
3451         return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3452                                        AMDGPU_MAX_BL_LEVEL);
3453 }
3454
3455 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3456                                       uint32_t brightness)
3457 {
3458         unsigned min, max;
3459
3460         if (!get_brightness_range(caps, &min, &max))
3461                 return brightness;
3462
3463         if (brightness < min)
3464                 return 0;
3465         // Rescale min..max to 0..255
3466         return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3467                                  max - min);
3468 }
3469
3470 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3471 {
3472         struct amdgpu_display_manager *dm = bl_get_data(bd);
3473         struct amdgpu_dm_backlight_caps caps;
3474         struct dc_link *link[AMDGPU_DM_MAX_NUM_EDP];
3475         u32 brightness;
3476         bool rc;
3477         int i;
3478
3479         amdgpu_dm_update_backlight_caps(dm);
3480         caps = dm->backlight_caps;
3481
3482         for (i = 0; i < dm->num_of_edps; i++)
3483                 link[i] = (struct dc_link *)dm->backlight_link[i];
3484
3485         brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3486         // Change brightness based on AUX property
3487         if (caps.aux_support) {
3488                 for (i = 0; i < dm->num_of_edps; i++) {
3489                         rc = dc_link_set_backlight_level_nits(link[i], true, brightness,
3490                                 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3491                         if (!rc) {
3492                                 DRM_ERROR("DM: Failed to update backlight via AUX on eDP[%d]\n", i);
3493                                 break;
3494                         }
3495                 }
3496         } else {
3497                 for (i = 0; i < dm->num_of_edps; i++) {
3498                         rc = dc_link_set_backlight_level(dm->backlight_link[i], brightness, 0);
3499                         if (!rc) {
3500                                 DRM_ERROR("DM: Failed to update backlight on eDP[%d]\n", i);
3501                                 break;
3502                         }
3503                 }
3504         }
3505
3506         return rc ? 0 : 1;
3507 }
3508
3509 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3510 {
3511         struct amdgpu_display_manager *dm = bl_get_data(bd);
3512         struct amdgpu_dm_backlight_caps caps;
3513
3514         amdgpu_dm_update_backlight_caps(dm);
3515         caps = dm->backlight_caps;
3516
3517         if (caps.aux_support) {
3518                 struct dc_link *link = (struct dc_link *)dm->backlight_link[0];
3519                 u32 avg, peak;
3520                 bool rc;
3521
3522                 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3523                 if (!rc)
3524                         return bd->props.brightness;
3525                 return convert_brightness_to_user(&caps, avg);
3526         } else {
3527                 int ret = dc_link_get_backlight_level(dm->backlight_link[0]);
3528
3529                 if (ret == DC_ERROR_UNEXPECTED)
3530                         return bd->props.brightness;
3531                 return convert_brightness_to_user(&caps, ret);
3532         }
3533 }
3534
3535 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3536         .options = BL_CORE_SUSPENDRESUME,
3537         .get_brightness = amdgpu_dm_backlight_get_brightness,
3538         .update_status  = amdgpu_dm_backlight_update_status,
3539 };
3540
3541 static void
3542 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3543 {
3544         char bl_name[16];
3545         struct backlight_properties props = { 0 };
3546
3547         amdgpu_dm_update_backlight_caps(dm);
3548
3549         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3550         props.brightness = AMDGPU_MAX_BL_LEVEL;
3551         props.type = BACKLIGHT_RAW;
3552
3553         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3554                  adev_to_drm(dm->adev)->primary->index);
3555
3556         dm->backlight_dev = backlight_device_register(bl_name,
3557                                                       adev_to_drm(dm->adev)->dev,
3558                                                       dm,
3559                                                       &amdgpu_dm_backlight_ops,
3560                                                       &props);
3561
3562         if (IS_ERR(dm->backlight_dev))
3563                 DRM_ERROR("DM: Backlight registration failed!\n");
3564         else
3565                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3566 }
3567
3568 #endif
3569
3570 static int initialize_plane(struct amdgpu_display_manager *dm,
3571                             struct amdgpu_mode_info *mode_info, int plane_id,
3572                             enum drm_plane_type plane_type,
3573                             const struct dc_plane_cap *plane_cap)
3574 {
3575         struct drm_plane *plane;
3576         unsigned long possible_crtcs;
3577         int ret = 0;
3578
3579         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3580         if (!plane) {
3581                 DRM_ERROR("KMS: Failed to allocate plane\n");
3582                 return -ENOMEM;
3583         }
3584         plane->type = plane_type;
3585
3586         /*
3587          * HACK: IGT tests expect that the primary plane for a CRTC
3588          * can only have one possible CRTC. Only expose support for
3589          * any CRTC if they're not going to be used as a primary plane
3590          * for a CRTC - like overlay or underlay planes.
3591          */
3592         possible_crtcs = 1 << plane_id;
3593         if (plane_id >= dm->dc->caps.max_streams)
3594                 possible_crtcs = 0xff;
3595
3596         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3597
3598         if (ret) {
3599                 DRM_ERROR("KMS: Failed to initialize plane\n");
3600                 kfree(plane);
3601                 return ret;
3602         }
3603
3604         if (mode_info)
3605                 mode_info->planes[plane_id] = plane;
3606
3607         return ret;
3608 }
3609
3610
3611 static void register_backlight_device(struct amdgpu_display_manager *dm,
3612                                       struct dc_link *link)
3613 {
3614 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3615         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3616
3617         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3618             link->type != dc_connection_none) {
3619                 /*
3620                  * Event if registration failed, we should continue with
3621                  * DM initialization because not having a backlight control
3622                  * is better then a black screen.
3623                  */
3624                 if (!dm->backlight_dev)
3625                         amdgpu_dm_register_backlight_device(dm);
3626
3627                 if (dm->backlight_dev) {
3628                         dm->backlight_link[dm->num_of_edps] = link;
3629                         dm->num_of_edps++;
3630                 }
3631         }
3632 #endif
3633 }
3634
3635
3636 /*
3637  * In this architecture, the association
3638  * connector -> encoder -> crtc
3639  * id not really requried. The crtc and connector will hold the
3640  * display_index as an abstraction to use with DAL component
3641  *
3642  * Returns 0 on success
3643  */
3644 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3645 {
3646         struct amdgpu_display_manager *dm = &adev->dm;
3647         int32_t i;
3648         struct amdgpu_dm_connector *aconnector = NULL;
3649         struct amdgpu_encoder *aencoder = NULL;
3650         struct amdgpu_mode_info *mode_info = &adev->mode_info;
3651         uint32_t link_cnt;
3652         int32_t primary_planes;
3653         enum dc_connection_type new_connection_type = dc_connection_none;
3654         const struct dc_plane_cap *plane;
3655
3656         dm->display_indexes_num = dm->dc->caps.max_streams;
3657         /* Update the actual used number of crtc */
3658         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3659
3660         link_cnt = dm->dc->caps.max_links;
3661         if (amdgpu_dm_mode_config_init(dm->adev)) {
3662                 DRM_ERROR("DM: Failed to initialize mode config\n");
3663                 return -EINVAL;
3664         }
3665
3666         /* There is one primary plane per CRTC */
3667         primary_planes = dm->dc->caps.max_streams;
3668         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3669
3670         /*
3671          * Initialize primary planes, implicit planes for legacy IOCTLS.
3672          * Order is reversed to match iteration order in atomic check.
3673          */
3674         for (i = (primary_planes - 1); i >= 0; i--) {
3675                 plane = &dm->dc->caps.planes[i];
3676
3677                 if (initialize_plane(dm, mode_info, i,
3678                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
3679                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
3680                         goto fail;
3681                 }
3682         }
3683
3684         /*
3685          * Initialize overlay planes, index starting after primary planes.
3686          * These planes have a higher DRM index than the primary planes since
3687          * they should be considered as having a higher z-order.
3688          * Order is reversed to match iteration order in atomic check.
3689          *
3690          * Only support DCN for now, and only expose one so we don't encourage
3691          * userspace to use up all the pipes.
3692          */
3693         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3694                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3695
3696                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3697                         continue;
3698
3699                 if (!plane->blends_with_above || !plane->blends_with_below)
3700                         continue;
3701
3702                 if (!plane->pixel_format_support.argb8888)
3703                         continue;
3704
3705                 if (initialize_plane(dm, NULL, primary_planes + i,
3706                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
3707                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3708                         goto fail;
3709                 }
3710
3711                 /* Only create one overlay plane. */
3712                 break;
3713         }
3714
3715         for (i = 0; i < dm->dc->caps.max_streams; i++)
3716                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3717                         DRM_ERROR("KMS: Failed to initialize crtc\n");
3718                         goto fail;
3719                 }
3720
3721 #if defined(CONFIG_DRM_AMD_DC_DCN)
3722         /* Use Outbox interrupt */
3723         switch (adev->asic_type) {
3724         case CHIP_SIENNA_CICHLID:
3725         case CHIP_NAVY_FLOUNDER:
3726         case CHIP_RENOIR:
3727                 if (register_outbox_irq_handlers(dm->adev)) {
3728                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3729                         goto fail;
3730                 }
3731                 break;
3732         default:
3733                 DRM_DEBUG_KMS("Unsupported ASIC type for outbox: 0x%X\n", adev->asic_type);
3734         }
3735 #endif
3736
3737         /* loops over all connectors on the board */
3738         for (i = 0; i < link_cnt; i++) {
3739                 struct dc_link *link = NULL;
3740
3741                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3742                         DRM_ERROR(
3743                                 "KMS: Cannot support more than %d display indexes\n",
3744                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
3745                         continue;
3746                 }
3747
3748                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3749                 if (!aconnector)
3750                         goto fail;
3751
3752                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3753                 if (!aencoder)
3754                         goto fail;
3755
3756                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3757                         DRM_ERROR("KMS: Failed to initialize encoder\n");
3758                         goto fail;
3759                 }
3760
3761                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3762                         DRM_ERROR("KMS: Failed to initialize connector\n");
3763                         goto fail;
3764                 }
3765
3766                 link = dc_get_link_at_index(dm->dc, i);
3767
3768                 if (!dc_link_detect_sink(link, &new_connection_type))
3769                         DRM_ERROR("KMS: Failed to detect connector\n");
3770
3771                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3772                         emulated_link_detect(link);
3773                         amdgpu_dm_update_connector_after_detect(aconnector);
3774
3775                 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3776                         amdgpu_dm_update_connector_after_detect(aconnector);
3777                         register_backlight_device(dm, link);
3778                         if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3779                                 amdgpu_dm_set_psr_caps(link);
3780                 }
3781
3782
3783         }
3784
3785         /* Software is initialized. Now we can register interrupt handlers. */
3786         switch (adev->asic_type) {
3787 #if defined(CONFIG_DRM_AMD_DC_SI)
3788         case CHIP_TAHITI:
3789         case CHIP_PITCAIRN:
3790         case CHIP_VERDE:
3791         case CHIP_OLAND:
3792                 if (dce60_register_irq_handlers(dm->adev)) {
3793                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3794                         goto fail;
3795                 }
3796                 break;
3797 #endif
3798         case CHIP_BONAIRE:
3799         case CHIP_HAWAII:
3800         case CHIP_KAVERI:
3801         case CHIP_KABINI:
3802         case CHIP_MULLINS:
3803         case CHIP_TONGA:
3804         case CHIP_FIJI:
3805         case CHIP_CARRIZO:
3806         case CHIP_STONEY:
3807         case CHIP_POLARIS11:
3808         case CHIP_POLARIS10:
3809         case CHIP_POLARIS12:
3810         case CHIP_VEGAM:
3811         case CHIP_VEGA10:
3812         case CHIP_VEGA12:
3813         case CHIP_VEGA20:
3814                 if (dce110_register_irq_handlers(dm->adev)) {
3815                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3816                         goto fail;
3817                 }
3818                 break;
3819 #if defined(CONFIG_DRM_AMD_DC_DCN)
3820         case CHIP_RAVEN:
3821         case CHIP_NAVI12:
3822         case CHIP_NAVI10:
3823         case CHIP_NAVI14:
3824         case CHIP_RENOIR:
3825         case CHIP_SIENNA_CICHLID:
3826         case CHIP_NAVY_FLOUNDER:
3827         case CHIP_DIMGREY_CAVEFISH:
3828         case CHIP_BEIGE_GOBY:
3829         case CHIP_VANGOGH:
3830                 if (dcn10_register_irq_handlers(dm->adev)) {
3831                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3832                         goto fail;
3833                 }
3834                 break;
3835 #endif
3836         default:
3837                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3838                 goto fail;
3839         }
3840
3841         return 0;
3842 fail:
3843         kfree(aencoder);
3844         kfree(aconnector);
3845
3846         return -EINVAL;
3847 }
3848
3849 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3850 {
3851         drm_mode_config_cleanup(dm->ddev);
3852         drm_atomic_private_obj_fini(&dm->atomic_obj);
3853         return;
3854 }
3855
3856 /******************************************************************************
3857  * amdgpu_display_funcs functions
3858  *****************************************************************************/
3859
3860 /*
3861  * dm_bandwidth_update - program display watermarks
3862  *
3863  * @adev: amdgpu_device pointer
3864  *
3865  * Calculate and program the display watermarks and line buffer allocation.
3866  */
3867 static void dm_bandwidth_update(struct amdgpu_device *adev)
3868 {
3869         /* TODO: implement later */
3870 }
3871
3872 static const struct amdgpu_display_funcs dm_display_funcs = {
3873         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3874         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3875         .backlight_set_level = NULL, /* never called for DC */
3876         .backlight_get_level = NULL, /* never called for DC */
3877         .hpd_sense = NULL,/* called unconditionally */
3878         .hpd_set_polarity = NULL, /* called unconditionally */
3879         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3880         .page_flip_get_scanoutpos =
3881                 dm_crtc_get_scanoutpos,/* called unconditionally */
3882         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3883         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3884 };
3885
3886 #if defined(CONFIG_DEBUG_KERNEL_DC)
3887
3888 static ssize_t s3_debug_store(struct device *device,
3889                               struct device_attribute *attr,
3890                               const char *buf,
3891                               size_t count)
3892 {
3893         int ret;
3894         int s3_state;
3895         struct drm_device *drm_dev = dev_get_drvdata(device);
3896         struct amdgpu_device *adev = drm_to_adev(drm_dev);
3897
3898         ret = kstrtoint(buf, 0, &s3_state);
3899
3900         if (ret == 0) {
3901                 if (s3_state) {
3902                         dm_resume(adev);
3903                         drm_kms_helper_hotplug_event(adev_to_drm(adev));
3904                 } else
3905                         dm_suspend(adev);
3906         }
3907
3908         return ret == 0 ? count : 0;
3909 }
3910
3911 DEVICE_ATTR_WO(s3_debug);
3912
3913 #endif
3914
3915 static int dm_early_init(void *handle)
3916 {
3917         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3918
3919         switch (adev->asic_type) {
3920 #if defined(CONFIG_DRM_AMD_DC_SI)
3921         case CHIP_TAHITI:
3922         case CHIP_PITCAIRN:
3923         case CHIP_VERDE:
3924                 adev->mode_info.num_crtc = 6;
3925                 adev->mode_info.num_hpd = 6;
3926                 adev->mode_info.num_dig = 6;
3927                 break;
3928         case CHIP_OLAND:
3929                 adev->mode_info.num_crtc = 2;
3930                 adev->mode_info.num_hpd = 2;
3931                 adev->mode_info.num_dig = 2;
3932                 break;
3933 #endif
3934         case CHIP_BONAIRE:
3935         case CHIP_HAWAII:
3936                 adev->mode_info.num_crtc = 6;
3937                 adev->mode_info.num_hpd = 6;
3938                 adev->mode_info.num_dig = 6;
3939                 break;
3940         case CHIP_KAVERI:
3941                 adev->mode_info.num_crtc = 4;
3942                 adev->mode_info.num_hpd = 6;
3943                 adev->mode_info.num_dig = 7;
3944                 break;
3945         case CHIP_KABINI:
3946         case CHIP_MULLINS:
3947                 adev->mode_info.num_crtc = 2;
3948                 adev->mode_info.num_hpd = 6;
3949                 adev->mode_info.num_dig = 6;
3950                 break;
3951         case CHIP_FIJI:
3952         case CHIP_TONGA:
3953                 adev->mode_info.num_crtc = 6;
3954                 adev->mode_info.num_hpd = 6;
3955                 adev->mode_info.num_dig = 7;
3956                 break;
3957         case CHIP_CARRIZO:
3958                 adev->mode_info.num_crtc = 3;
3959                 adev->mode_info.num_hpd = 6;
3960                 adev->mode_info.num_dig = 9;
3961                 break;
3962         case CHIP_STONEY:
3963                 adev->mode_info.num_crtc = 2;
3964                 adev->mode_info.num_hpd = 6;
3965                 adev->mode_info.num_dig = 9;
3966                 break;
3967         case CHIP_POLARIS11:
3968         case CHIP_POLARIS12:
3969                 adev->mode_info.num_crtc = 5;
3970                 adev->mode_info.num_hpd = 5;
3971                 adev->mode_info.num_dig = 5;
3972                 break;
3973         case CHIP_POLARIS10:
3974         case CHIP_VEGAM:
3975                 adev->mode_info.num_crtc = 6;
3976                 adev->mode_info.num_hpd = 6;
3977                 adev->mode_info.num_dig = 6;
3978                 break;
3979         case CHIP_VEGA10:
3980         case CHIP_VEGA12:
3981         case CHIP_VEGA20:
3982                 adev->mode_info.num_crtc = 6;
3983                 adev->mode_info.num_hpd = 6;
3984                 adev->mode_info.num_dig = 6;
3985                 break;
3986 #if defined(CONFIG_DRM_AMD_DC_DCN)
3987         case CHIP_RAVEN:
3988         case CHIP_RENOIR:
3989         case CHIP_VANGOGH:
3990                 adev->mode_info.num_crtc = 4;
3991                 adev->mode_info.num_hpd = 4;
3992                 adev->mode_info.num_dig = 4;
3993                 break;
3994         case CHIP_NAVI10:
3995         case CHIP_NAVI12:
3996         case CHIP_SIENNA_CICHLID:
3997         case CHIP_NAVY_FLOUNDER:
3998                 adev->mode_info.num_crtc = 6;
3999                 adev->mode_info.num_hpd = 6;
4000                 adev->mode_info.num_dig = 6;
4001                 break;
4002         case CHIP_NAVI14:
4003         case CHIP_DIMGREY_CAVEFISH:
4004                 adev->mode_info.num_crtc = 5;
4005                 adev->mode_info.num_hpd = 5;
4006                 adev->mode_info.num_dig = 5;
4007                 break;
4008         case CHIP_BEIGE_GOBY:
4009                 adev->mode_info.num_crtc = 2;
4010                 adev->mode_info.num_hpd = 2;
4011                 adev->mode_info.num_dig = 2;
4012                 break;
4013 #endif
4014         default:
4015                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4016                 return -EINVAL;
4017         }
4018
4019         amdgpu_dm_set_irq_funcs(adev);
4020
4021         if (adev->mode_info.funcs == NULL)
4022                 adev->mode_info.funcs = &dm_display_funcs;
4023
4024         /*
4025          * Note: Do NOT change adev->audio_endpt_rreg and
4026          * adev->audio_endpt_wreg because they are initialised in
4027          * amdgpu_device_init()
4028          */
4029 #if defined(CONFIG_DEBUG_KERNEL_DC)
4030         device_create_file(
4031                 adev_to_drm(adev)->dev,
4032                 &dev_attr_s3_debug);
4033 #endif
4034
4035         return 0;
4036 }
4037
4038 static bool modeset_required(struct drm_crtc_state *crtc_state,
4039                              struct dc_stream_state *new_stream,
4040                              struct dc_stream_state *old_stream)
4041 {
4042         return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4043 }
4044
4045 static bool modereset_required(struct drm_crtc_state *crtc_state)
4046 {
4047         return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4048 }
4049
4050 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4051 {
4052         drm_encoder_cleanup(encoder);
4053         kfree(encoder);
4054 }
4055
4056 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4057         .destroy = amdgpu_dm_encoder_destroy,
4058 };
4059
4060
4061 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4062                                          struct drm_framebuffer *fb,
4063                                          int *min_downscale, int *max_upscale)
4064 {
4065         struct amdgpu_device *adev = drm_to_adev(dev);
4066         struct dc *dc = adev->dm.dc;
4067         /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4068         struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4069
4070         switch (fb->format->format) {
4071         case DRM_FORMAT_P010:
4072         case DRM_FORMAT_NV12:
4073         case DRM_FORMAT_NV21:
4074                 *max_upscale = plane_cap->max_upscale_factor.nv12;
4075                 *min_downscale = plane_cap->max_downscale_factor.nv12;
4076                 break;
4077
4078         case DRM_FORMAT_XRGB16161616F:
4079         case DRM_FORMAT_ARGB16161616F:
4080         case DRM_FORMAT_XBGR16161616F:
4081         case DRM_FORMAT_ABGR16161616F:
4082                 *max_upscale = plane_cap->max_upscale_factor.fp16;
4083                 *min_downscale = plane_cap->max_downscale_factor.fp16;
4084                 break;
4085
4086         default:
4087                 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4088                 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4089                 break;
4090         }
4091
4092         /*
4093          * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4094          * scaling factor of 1.0 == 1000 units.
4095          */
4096         if (*max_upscale == 1)
4097                 *max_upscale = 1000;
4098
4099         if (*min_downscale == 1)
4100                 *min_downscale = 1000;
4101 }
4102
4103
4104 static int fill_dc_scaling_info(const struct drm_plane_state *state,
4105                                 struct dc_scaling_info *scaling_info)
4106 {
4107         int scale_w, scale_h, min_downscale, max_upscale;
4108
4109         memset(scaling_info, 0, sizeof(*scaling_info));
4110
4111         /* Source is fixed 16.16 but we ignore mantissa for now... */
4112         scaling_info->src_rect.x = state->src_x >> 16;
4113         scaling_info->src_rect.y = state->src_y >> 16;
4114
4115         /*
4116          * For reasons we don't (yet) fully understand a non-zero
4117          * src_y coordinate into an NV12 buffer can cause a
4118          * system hang. To avoid hangs (and maybe be overly cautious)
4119          * let's reject both non-zero src_x and src_y.
4120          *
4121          * We currently know of only one use-case to reproduce a
4122          * scenario with non-zero src_x and src_y for NV12, which
4123          * is to gesture the YouTube Android app into full screen
4124          * on ChromeOS.
4125          */
4126         if (state->fb &&
4127             state->fb->format->format == DRM_FORMAT_NV12 &&
4128             (scaling_info->src_rect.x != 0 ||
4129              scaling_info->src_rect.y != 0))
4130                 return -EINVAL;
4131
4132         scaling_info->src_rect.width = state->src_w >> 16;
4133         if (scaling_info->src_rect.width == 0)
4134                 return -EINVAL;
4135
4136         scaling_info->src_rect.height = state->src_h >> 16;
4137         if (scaling_info->src_rect.height == 0)
4138                 return -EINVAL;
4139
4140         scaling_info->dst_rect.x = state->crtc_x;
4141         scaling_info->dst_rect.y = state->crtc_y;
4142
4143         if (state->crtc_w == 0)
4144                 return -EINVAL;
4145
4146         scaling_info->dst_rect.width = state->crtc_w;
4147
4148         if (state->crtc_h == 0)
4149                 return -EINVAL;
4150
4151         scaling_info->dst_rect.height = state->crtc_h;
4152
4153         /* DRM doesn't specify clipping on destination output. */
4154         scaling_info->clip_rect = scaling_info->dst_rect;
4155
4156         /* Validate scaling per-format with DC plane caps */
4157         if (state->plane && state->plane->dev && state->fb) {
4158                 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4159                                              &min_downscale, &max_upscale);
4160         } else {
4161                 min_downscale = 250;
4162                 max_upscale = 16000;
4163         }
4164
4165         scale_w = scaling_info->dst_rect.width * 1000 /
4166                   scaling_info->src_rect.width;
4167
4168         if (scale_w < min_downscale || scale_w > max_upscale)
4169                 return -EINVAL;
4170
4171         scale_h = scaling_info->dst_rect.height * 1000 /
4172                   scaling_info->src_rect.height;
4173
4174         if (scale_h < min_downscale || scale_h > max_upscale)
4175                 return -EINVAL;
4176
4177         /*
4178          * The "scaling_quality" can be ignored for now, quality = 0 has DC
4179          * assume reasonable defaults based on the format.
4180          */
4181
4182         return 0;
4183 }
4184
4185 static void
4186 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4187                                  uint64_t tiling_flags)
4188 {
4189         /* Fill GFX8 params */
4190         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4191                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4192
4193                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4194                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4195                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4196                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4197                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4198
4199                 /* XXX fix me for VI */
4200                 tiling_info->gfx8.num_banks = num_banks;
4201                 tiling_info->gfx8.array_mode =
4202                                 DC_ARRAY_2D_TILED_THIN1;
4203                 tiling_info->gfx8.tile_split = tile_split;
4204                 tiling_info->gfx8.bank_width = bankw;
4205                 tiling_info->gfx8.bank_height = bankh;
4206                 tiling_info->gfx8.tile_aspect = mtaspect;
4207                 tiling_info->gfx8.tile_mode =
4208                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4209         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4210                         == DC_ARRAY_1D_TILED_THIN1) {
4211                 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4212         }
4213
4214         tiling_info->gfx8.pipe_config =
4215                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4216 }
4217
4218 static void
4219 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4220                                   union dc_tiling_info *tiling_info)
4221 {
4222         tiling_info->gfx9.num_pipes =
4223                 adev->gfx.config.gb_addr_config_fields.num_pipes;
4224         tiling_info->gfx9.num_banks =
4225                 adev->gfx.config.gb_addr_config_fields.num_banks;
4226         tiling_info->gfx9.pipe_interleave =
4227                 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4228         tiling_info->gfx9.num_shader_engines =
4229                 adev->gfx.config.gb_addr_config_fields.num_se;
4230         tiling_info->gfx9.max_compressed_frags =
4231                 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4232         tiling_info->gfx9.num_rb_per_se =
4233                 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4234         tiling_info->gfx9.shaderEnable = 1;
4235         if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4236             adev->asic_type == CHIP_NAVY_FLOUNDER ||
4237             adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4238             adev->asic_type == CHIP_BEIGE_GOBY ||
4239             adev->asic_type == CHIP_VANGOGH)
4240                 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4241 }
4242
4243 static int
4244 validate_dcc(struct amdgpu_device *adev,
4245              const enum surface_pixel_format format,
4246              const enum dc_rotation_angle rotation,
4247              const union dc_tiling_info *tiling_info,
4248              const struct dc_plane_dcc_param *dcc,
4249              const struct dc_plane_address *address,
4250              const struct plane_size *plane_size)
4251 {
4252         struct dc *dc = adev->dm.dc;
4253         struct dc_dcc_surface_param input;
4254         struct dc_surface_dcc_cap output;
4255
4256         memset(&input, 0, sizeof(input));
4257         memset(&output, 0, sizeof(output));
4258
4259         if (!dcc->enable)
4260                 return 0;
4261
4262         if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4263             !dc->cap_funcs.get_dcc_compression_cap)
4264                 return -EINVAL;
4265
4266         input.format = format;
4267         input.surface_size.width = plane_size->surface_size.width;
4268         input.surface_size.height = plane_size->surface_size.height;
4269         input.swizzle_mode = tiling_info->gfx9.swizzle;
4270
4271         if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4272                 input.scan = SCAN_DIRECTION_HORIZONTAL;
4273         else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4274                 input.scan = SCAN_DIRECTION_VERTICAL;
4275
4276         if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4277                 return -EINVAL;
4278
4279         if (!output.capable)
4280                 return -EINVAL;
4281
4282         if (dcc->independent_64b_blks == 0 &&
4283             output.grph.rgb.independent_64b_blks != 0)
4284                 return -EINVAL;
4285
4286         return 0;
4287 }
4288
4289 static bool
4290 modifier_has_dcc(uint64_t modifier)
4291 {
4292         return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4293 }
4294
4295 static unsigned
4296 modifier_gfx9_swizzle_mode(uint64_t modifier)
4297 {
4298         if (modifier == DRM_FORMAT_MOD_LINEAR)
4299                 return 0;
4300
4301         return AMD_FMT_MOD_GET(TILE, modifier);
4302 }
4303
4304 static const struct drm_format_info *
4305 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4306 {
4307         return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4308 }
4309
4310 static void
4311 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4312                                     union dc_tiling_info *tiling_info,
4313                                     uint64_t modifier)
4314 {
4315         unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4316         unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4317         unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4318         unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4319
4320         fill_gfx9_tiling_info_from_device(adev, tiling_info);
4321
4322         if (!IS_AMD_FMT_MOD(modifier))
4323                 return;
4324
4325         tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4326         tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4327
4328         if (adev->family >= AMDGPU_FAMILY_NV) {
4329                 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4330         } else {
4331                 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4332
4333                 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4334         }
4335 }
4336
4337 enum dm_micro_swizzle {
4338         MICRO_SWIZZLE_Z = 0,
4339         MICRO_SWIZZLE_S = 1,
4340         MICRO_SWIZZLE_D = 2,
4341         MICRO_SWIZZLE_R = 3
4342 };
4343
4344 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4345                                           uint32_t format,
4346                                           uint64_t modifier)
4347 {
4348         struct amdgpu_device *adev = drm_to_adev(plane->dev);
4349         const struct drm_format_info *info = drm_format_info(format);
4350         int i;
4351
4352         enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4353
4354         if (!info)
4355                 return false;
4356
4357         /*
4358          * We always have to allow these modifiers:
4359          * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4360          * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4361          */
4362         if (modifier == DRM_FORMAT_MOD_LINEAR ||
4363             modifier == DRM_FORMAT_MOD_INVALID) {
4364                 return true;
4365         }
4366
4367         /* Check that the modifier is on the list of the plane's supported modifiers. */
4368         for (i = 0; i < plane->modifier_count; i++) {
4369                 if (modifier == plane->modifiers[i])
4370                         break;
4371         }
4372         if (i == plane->modifier_count)
4373                 return false;
4374
4375         /*
4376          * For D swizzle the canonical modifier depends on the bpp, so check
4377          * it here.
4378          */
4379         if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4380             adev->family >= AMDGPU_FAMILY_NV) {
4381                 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4382                         return false;
4383         }
4384
4385         if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4386             info->cpp[0] < 8)
4387                 return false;
4388
4389         if (modifier_has_dcc(modifier)) {
4390                 /* Per radeonsi comments 16/64 bpp are more complicated. */
4391                 if (info->cpp[0] != 4)
4392                         return false;
4393                 /* We support multi-planar formats, but not when combined with
4394                  * additional DCC metadata planes. */
4395                 if (info->num_planes > 1)
4396                         return false;
4397         }
4398
4399         return true;
4400 }
4401
4402 static void
4403 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4404 {
4405         if (!*mods)
4406                 return;
4407
4408         if (*cap - *size < 1) {
4409                 uint64_t new_cap = *cap * 2;
4410                 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4411
4412                 if (!new_mods) {
4413                         kfree(*mods);
4414                         *mods = NULL;
4415                         return;
4416                 }
4417
4418                 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4419                 kfree(*mods);
4420                 *mods = new_mods;
4421                 *cap = new_cap;
4422         }
4423
4424         (*mods)[*size] = mod;
4425         *size += 1;
4426 }
4427
4428 static void
4429 add_gfx9_modifiers(const struct amdgpu_device *adev,
4430                    uint64_t **mods, uint64_t *size, uint64_t *capacity)
4431 {
4432         int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4433         int pipe_xor_bits = min(8, pipes +
4434                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4435         int bank_xor_bits = min(8 - pipe_xor_bits,
4436                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4437         int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4438                  ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4439
4440
4441         if (adev->family == AMDGPU_FAMILY_RV) {
4442                 /* Raven2 and later */
4443                 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4444
4445                 /*
4446                  * No _D DCC swizzles yet because we only allow 32bpp, which
4447                  * doesn't support _D on DCN
4448                  */
4449
4450                 if (has_constant_encode) {
4451                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4452                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4453                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4454                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4455                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4456                                     AMD_FMT_MOD_SET(DCC, 1) |
4457                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4458                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4459                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4460                 }
4461
4462                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4463                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4464                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4465                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4466                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4467                             AMD_FMT_MOD_SET(DCC, 1) |
4468                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4469                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4470                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4471
4472                 if (has_constant_encode) {
4473                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4474                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4475                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4476                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4477                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4478                                     AMD_FMT_MOD_SET(DCC, 1) |
4479                                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4480                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4481                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4482
4483                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4484                                     AMD_FMT_MOD_SET(RB, rb) |
4485                                     AMD_FMT_MOD_SET(PIPE, pipes));
4486                 }
4487
4488                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4489                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4490                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4491                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4492                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4493                             AMD_FMT_MOD_SET(DCC, 1) |
4494                             AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4495                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4496                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4497                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4498                             AMD_FMT_MOD_SET(RB, rb) |
4499                             AMD_FMT_MOD_SET(PIPE, pipes));
4500         }
4501
4502         /*
4503          * Only supported for 64bpp on Raven, will be filtered on format in
4504          * dm_plane_format_mod_supported.
4505          */
4506         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4507                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4508                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4509                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4510                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4511
4512         if (adev->family == AMDGPU_FAMILY_RV) {
4513                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4514                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4515                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4516                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4517                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4518         }
4519
4520         /*
4521          * Only supported for 64bpp on Raven, will be filtered on format in
4522          * dm_plane_format_mod_supported.
4523          */
4524         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4525                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4526                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4527
4528         if (adev->family == AMDGPU_FAMILY_RV) {
4529                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4530                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4531                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4532         }
4533 }
4534
4535 static void
4536 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4537                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
4538 {
4539         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4540
4541         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4542                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4543                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4544                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4545                     AMD_FMT_MOD_SET(DCC, 1) |
4546                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4547                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4548                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4549
4550         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4551                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4552                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4553                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4554                     AMD_FMT_MOD_SET(DCC, 1) |
4555                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4556                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4557                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4558                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4559
4560         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4561                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4562                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4563                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4564
4565         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4566                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4567                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4568                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4569
4570
4571         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4572         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4573                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4574                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4575
4576         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4577                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4578                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4579 }
4580
4581 static void
4582 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4583                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
4584 {
4585         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4586         int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4587
4588         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4589                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4590                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4591                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4592                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
4593                     AMD_FMT_MOD_SET(DCC, 1) |
4594                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4595                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4596                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4597                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4598
4599         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4600                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4601                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4602                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4603                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
4604                     AMD_FMT_MOD_SET(DCC, 1) |
4605                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4606                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4607                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4608                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4609                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4610
4611         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4612                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4613                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4614                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4615                     AMD_FMT_MOD_SET(PACKERS, pkrs));
4616
4617         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4618                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4619                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4620                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4621                     AMD_FMT_MOD_SET(PACKERS, pkrs));
4622
4623         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4624         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4625                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4626                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4627
4628         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4629                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4630                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4631 }
4632
4633 static int
4634 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4635 {
4636         uint64_t size = 0, capacity = 128;
4637         *mods = NULL;
4638
4639         /* We have not hooked up any pre-GFX9 modifiers. */
4640         if (adev->family < AMDGPU_FAMILY_AI)
4641                 return 0;
4642
4643         *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4644
4645         if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4646                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4647                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4648                 return *mods ? 0 : -ENOMEM;
4649         }
4650
4651         switch (adev->family) {
4652         case AMDGPU_FAMILY_AI:
4653         case AMDGPU_FAMILY_RV:
4654                 add_gfx9_modifiers(adev, mods, &size, &capacity);
4655                 break;
4656         case AMDGPU_FAMILY_NV:
4657         case AMDGPU_FAMILY_VGH:
4658                 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4659                         add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4660                 else
4661                         add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4662                 break;
4663         }
4664
4665         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4666
4667         /* INVALID marks the end of the list. */
4668         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4669
4670         if (!*mods)
4671                 return -ENOMEM;
4672
4673         return 0;
4674 }
4675
4676 static int
4677 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4678                                           const struct amdgpu_framebuffer *afb,
4679                                           const enum surface_pixel_format format,
4680                                           const enum dc_rotation_angle rotation,
4681                                           const struct plane_size *plane_size,
4682                                           union dc_tiling_info *tiling_info,
4683                                           struct dc_plane_dcc_param *dcc,
4684                                           struct dc_plane_address *address,
4685                                           const bool force_disable_dcc)
4686 {
4687         const uint64_t modifier = afb->base.modifier;
4688         int ret;
4689
4690         fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4691         tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4692
4693         if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4694                 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4695
4696                 dcc->enable = 1;
4697                 dcc->meta_pitch = afb->base.pitches[1];
4698                 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4699
4700                 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4701                 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4702         }
4703
4704         ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4705         if (ret)
4706                 return ret;
4707
4708         return 0;
4709 }
4710
4711 static int
4712 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4713                              const struct amdgpu_framebuffer *afb,
4714                              const enum surface_pixel_format format,
4715                              const enum dc_rotation_angle rotation,
4716                              const uint64_t tiling_flags,
4717                              union dc_tiling_info *tiling_info,
4718                              struct plane_size *plane_size,
4719                              struct dc_plane_dcc_param *dcc,
4720                              struct dc_plane_address *address,
4721                              bool tmz_surface,
4722                              bool force_disable_dcc)
4723 {
4724         const struct drm_framebuffer *fb = &afb->base;
4725         int ret;
4726
4727         memset(tiling_info, 0, sizeof(*tiling_info));
4728         memset(plane_size, 0, sizeof(*plane_size));
4729         memset(dcc, 0, sizeof(*dcc));
4730         memset(address, 0, sizeof(*address));
4731
4732         address->tmz_surface = tmz_surface;
4733
4734         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4735                 uint64_t addr = afb->address + fb->offsets[0];
4736
4737                 plane_size->surface_size.x = 0;
4738                 plane_size->surface_size.y = 0;
4739                 plane_size->surface_size.width = fb->width;
4740                 plane_size->surface_size.height = fb->height;
4741                 plane_size->surface_pitch =
4742                         fb->pitches[0] / fb->format->cpp[0];
4743
4744                 address->type = PLN_ADDR_TYPE_GRAPHICS;
4745                 address->grph.addr.low_part = lower_32_bits(addr);
4746                 address->grph.addr.high_part = upper_32_bits(addr);
4747         } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4748                 uint64_t luma_addr = afb->address + fb->offsets[0];
4749                 uint64_t chroma_addr = afb->address + fb->offsets[1];
4750
4751                 plane_size->surface_size.x = 0;
4752                 plane_size->surface_size.y = 0;
4753                 plane_size->surface_size.width = fb->width;
4754                 plane_size->surface_size.height = fb->height;
4755                 plane_size->surface_pitch =
4756                         fb->pitches[0] / fb->format->cpp[0];
4757
4758                 plane_size->chroma_size.x = 0;
4759                 plane_size->chroma_size.y = 0;
4760                 /* TODO: set these based on surface format */
4761                 plane_size->chroma_size.width = fb->width / 2;
4762                 plane_size->chroma_size.height = fb->height / 2;
4763
4764                 plane_size->chroma_pitch =
4765                         fb->pitches[1] / fb->format->cpp[1];
4766
4767                 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4768                 address->video_progressive.luma_addr.low_part =
4769                         lower_32_bits(luma_addr);
4770                 address->video_progressive.luma_addr.high_part =
4771                         upper_32_bits(luma_addr);
4772                 address->video_progressive.chroma_addr.low_part =
4773                         lower_32_bits(chroma_addr);
4774                 address->video_progressive.chroma_addr.high_part =
4775                         upper_32_bits(chroma_addr);
4776         }
4777
4778         if (adev->family >= AMDGPU_FAMILY_AI) {
4779                 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4780                                                                 rotation, plane_size,
4781                                                                 tiling_info, dcc,
4782                                                                 address,
4783                                                                 force_disable_dcc);
4784                 if (ret)
4785                         return ret;
4786         } else {
4787                 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4788         }
4789
4790         return 0;
4791 }
4792
4793 static void
4794 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4795                                bool *per_pixel_alpha, bool *global_alpha,
4796                                int *global_alpha_value)
4797 {
4798         *per_pixel_alpha = false;
4799         *global_alpha = false;
4800         *global_alpha_value = 0xff;
4801
4802         if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4803                 return;
4804
4805         if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4806                 static const uint32_t alpha_formats[] = {
4807                         DRM_FORMAT_ARGB8888,
4808                         DRM_FORMAT_RGBA8888,
4809                         DRM_FORMAT_ABGR8888,
4810                 };
4811                 uint32_t format = plane_state->fb->format->format;
4812                 unsigned int i;
4813
4814                 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4815                         if (format == alpha_formats[i]) {
4816                                 *per_pixel_alpha = true;
4817                                 break;
4818                         }
4819                 }
4820         }
4821
4822         if (plane_state->alpha < 0xffff) {
4823                 *global_alpha = true;
4824                 *global_alpha_value = plane_state->alpha >> 8;
4825         }
4826 }
4827
4828 static int
4829 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4830                             const enum surface_pixel_format format,
4831                             enum dc_color_space *color_space)
4832 {
4833         bool full_range;
4834
4835         *color_space = COLOR_SPACE_SRGB;
4836
4837         /* DRM color properties only affect non-RGB formats. */
4838         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4839                 return 0;
4840
4841         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4842
4843         switch (plane_state->color_encoding) {
4844         case DRM_COLOR_YCBCR_BT601:
4845                 if (full_range)
4846                         *color_space = COLOR_SPACE_YCBCR601;
4847                 else
4848                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4849                 break;
4850
4851         case DRM_COLOR_YCBCR_BT709:
4852                 if (full_range)
4853                         *color_space = COLOR_SPACE_YCBCR709;
4854                 else
4855                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4856                 break;
4857
4858         case DRM_COLOR_YCBCR_BT2020:
4859                 if (full_range)
4860                         *color_space = COLOR_SPACE_2020_YCBCR;
4861                 else
4862                         return -EINVAL;
4863                 break;
4864
4865         default:
4866                 return -EINVAL;
4867         }
4868
4869         return 0;
4870 }
4871
4872 static int
4873 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4874                             const struct drm_plane_state *plane_state,
4875                             const uint64_t tiling_flags,
4876                             struct dc_plane_info *plane_info,
4877                             struct dc_plane_address *address,
4878                             bool tmz_surface,
4879                             bool force_disable_dcc)
4880 {
4881         const struct drm_framebuffer *fb = plane_state->fb;
4882         const struct amdgpu_framebuffer *afb =
4883                 to_amdgpu_framebuffer(plane_state->fb);
4884         int ret;
4885
4886         memset(plane_info, 0, sizeof(*plane_info));
4887
4888         switch (fb->format->format) {
4889         case DRM_FORMAT_C8:
4890                 plane_info->format =
4891                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4892                 break;
4893         case DRM_FORMAT_RGB565:
4894                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4895                 break;
4896         case DRM_FORMAT_XRGB8888:
4897         case DRM_FORMAT_ARGB8888:
4898                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4899                 break;
4900         case DRM_FORMAT_XRGB2101010:
4901         case DRM_FORMAT_ARGB2101010:
4902                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4903                 break;
4904         case DRM_FORMAT_XBGR2101010:
4905         case DRM_FORMAT_ABGR2101010:
4906                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4907                 break;
4908         case DRM_FORMAT_XBGR8888:
4909         case DRM_FORMAT_ABGR8888:
4910                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4911                 break;
4912         case DRM_FORMAT_NV21:
4913                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4914                 break;
4915         case DRM_FORMAT_NV12:
4916                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4917                 break;
4918         case DRM_FORMAT_P010:
4919                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4920                 break;
4921         case DRM_FORMAT_XRGB16161616F:
4922         case DRM_FORMAT_ARGB16161616F:
4923                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4924                 break;
4925         case DRM_FORMAT_XBGR16161616F:
4926         case DRM_FORMAT_ABGR16161616F:
4927                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4928                 break;
4929         default:
4930                 DRM_ERROR(
4931                         "Unsupported screen format %p4cc\n",
4932                         &fb->format->format);
4933                 return -EINVAL;
4934         }
4935
4936         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4937         case DRM_MODE_ROTATE_0:
4938                 plane_info->rotation = ROTATION_ANGLE_0;
4939                 break;
4940         case DRM_MODE_ROTATE_90:
4941                 plane_info->rotation = ROTATION_ANGLE_90;
4942                 break;
4943         case DRM_MODE_ROTATE_180:
4944                 plane_info->rotation = ROTATION_ANGLE_180;
4945                 break;
4946         case DRM_MODE_ROTATE_270:
4947                 plane_info->rotation = ROTATION_ANGLE_270;
4948                 break;
4949         default:
4950                 plane_info->rotation = ROTATION_ANGLE_0;
4951                 break;
4952         }
4953
4954         plane_info->visible = true;
4955         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4956
4957         plane_info->layer_index = 0;
4958
4959         ret = fill_plane_color_attributes(plane_state, plane_info->format,
4960                                           &plane_info->color_space);
4961         if (ret)
4962                 return ret;
4963
4964         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4965                                            plane_info->rotation, tiling_flags,
4966                                            &plane_info->tiling_info,
4967                                            &plane_info->plane_size,
4968                                            &plane_info->dcc, address, tmz_surface,
4969                                            force_disable_dcc);
4970         if (ret)
4971                 return ret;
4972
4973         fill_blending_from_plane_state(
4974                 plane_state, &plane_info->per_pixel_alpha,
4975                 &plane_info->global_alpha, &plane_info->global_alpha_value);
4976
4977         return 0;
4978 }
4979
4980 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4981                                     struct dc_plane_state *dc_plane_state,
4982                                     struct drm_plane_state *plane_state,
4983                                     struct drm_crtc_state *crtc_state)
4984 {
4985         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4986         struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4987         struct dc_scaling_info scaling_info;
4988         struct dc_plane_info plane_info;
4989         int ret;
4990         bool force_disable_dcc = false;
4991
4992         ret = fill_dc_scaling_info(plane_state, &scaling_info);
4993         if (ret)
4994                 return ret;
4995
4996         dc_plane_state->src_rect = scaling_info.src_rect;
4997         dc_plane_state->dst_rect = scaling_info.dst_rect;
4998         dc_plane_state->clip_rect = scaling_info.clip_rect;
4999         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5000
5001         force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5002         ret = fill_dc_plane_info_and_addr(adev, plane_state,
5003                                           afb->tiling_flags,
5004                                           &plane_info,
5005                                           &dc_plane_state->address,
5006                                           afb->tmz_surface,
5007                                           force_disable_dcc);
5008         if (ret)
5009                 return ret;
5010
5011         dc_plane_state->format = plane_info.format;
5012         dc_plane_state->color_space = plane_info.color_space;
5013         dc_plane_state->format = plane_info.format;
5014         dc_plane_state->plane_size = plane_info.plane_size;
5015         dc_plane_state->rotation = plane_info.rotation;
5016         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5017         dc_plane_state->stereo_format = plane_info.stereo_format;
5018         dc_plane_state->tiling_info = plane_info.tiling_info;
5019         dc_plane_state->visible = plane_info.visible;
5020         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5021         dc_plane_state->global_alpha = plane_info.global_alpha;
5022         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5023         dc_plane_state->dcc = plane_info.dcc;
5024         dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5025         dc_plane_state->flip_int_enabled = true;
5026
5027         /*
5028          * Always set input transfer function, since plane state is refreshed
5029          * every time.
5030          */
5031         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5032         if (ret)
5033                 return ret;
5034
5035         return 0;
5036 }
5037
5038 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5039                                            const struct dm_connector_state *dm_state,
5040                                            struct dc_stream_state *stream)
5041 {
5042         enum amdgpu_rmx_type rmx_type;
5043
5044         struct rect src = { 0 }; /* viewport in composition space*/
5045         struct rect dst = { 0 }; /* stream addressable area */
5046
5047         /* no mode. nothing to be done */
5048         if (!mode)
5049                 return;
5050
5051         /* Full screen scaling by default */
5052         src.width = mode->hdisplay;
5053         src.height = mode->vdisplay;
5054         dst.width = stream->timing.h_addressable;
5055         dst.height = stream->timing.v_addressable;
5056
5057         if (dm_state) {
5058                 rmx_type = dm_state->scaling;
5059                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5060                         if (src.width * dst.height <
5061                                         src.height * dst.width) {
5062                                 /* height needs less upscaling/more downscaling */
5063                                 dst.width = src.width *
5064                                                 dst.height / src.height;
5065                         } else {
5066                                 /* width needs less upscaling/more downscaling */
5067                                 dst.height = src.height *
5068                                                 dst.width / src.width;
5069                         }
5070                 } else if (rmx_type == RMX_CENTER) {
5071                         dst = src;
5072                 }
5073
5074                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5075                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
5076
5077                 if (dm_state->underscan_enable) {
5078                         dst.x += dm_state->underscan_hborder / 2;
5079                         dst.y += dm_state->underscan_vborder / 2;
5080                         dst.width -= dm_state->underscan_hborder;
5081                         dst.height -= dm_state->underscan_vborder;
5082                 }
5083         }
5084
5085         stream->src = src;
5086         stream->dst = dst;
5087
5088         DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5089                       dst.x, dst.y, dst.width, dst.height);
5090
5091 }
5092
5093 static enum dc_color_depth
5094 convert_color_depth_from_display_info(const struct drm_connector *connector,
5095                                       bool is_y420, int requested_bpc)
5096 {
5097         uint8_t bpc;
5098
5099         if (is_y420) {
5100                 bpc = 8;
5101
5102                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5103                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5104                         bpc = 16;
5105                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5106                         bpc = 12;
5107                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5108                         bpc = 10;
5109         } else {
5110                 bpc = (uint8_t)connector->display_info.bpc;
5111                 /* Assume 8 bpc by default if no bpc is specified. */
5112                 bpc = bpc ? bpc : 8;
5113         }
5114
5115         if (requested_bpc > 0) {
5116                 /*
5117                  * Cap display bpc based on the user requested value.
5118                  *
5119                  * The value for state->max_bpc may not correctly updated
5120                  * depending on when the connector gets added to the state
5121                  * or if this was called outside of atomic check, so it
5122                  * can't be used directly.
5123                  */
5124                 bpc = min_t(u8, bpc, requested_bpc);
5125
5126                 /* Round down to the nearest even number. */
5127                 bpc = bpc - (bpc & 1);
5128         }
5129
5130         switch (bpc) {
5131         case 0:
5132                 /*
5133                  * Temporary Work around, DRM doesn't parse color depth for
5134                  * EDID revision before 1.4
5135                  * TODO: Fix edid parsing
5136                  */
5137                 return COLOR_DEPTH_888;
5138         case 6:
5139                 return COLOR_DEPTH_666;
5140         case 8:
5141                 return COLOR_DEPTH_888;
5142         case 10:
5143                 return COLOR_DEPTH_101010;
5144         case 12:
5145                 return COLOR_DEPTH_121212;
5146         case 14:
5147                 return COLOR_DEPTH_141414;
5148         case 16:
5149                 return COLOR_DEPTH_161616;
5150         default:
5151                 return COLOR_DEPTH_UNDEFINED;
5152         }
5153 }
5154
5155 static enum dc_aspect_ratio
5156 get_aspect_ratio(const struct drm_display_mode *mode_in)
5157 {
5158         /* 1-1 mapping, since both enums follow the HDMI spec. */
5159         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5160 }
5161
5162 static enum dc_color_space
5163 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5164 {
5165         enum dc_color_space color_space = COLOR_SPACE_SRGB;
5166
5167         switch (dc_crtc_timing->pixel_encoding) {
5168         case PIXEL_ENCODING_YCBCR422:
5169         case PIXEL_ENCODING_YCBCR444:
5170         case PIXEL_ENCODING_YCBCR420:
5171         {
5172                 /*
5173                  * 27030khz is the separation point between HDTV and SDTV
5174                  * according to HDMI spec, we use YCbCr709 and YCbCr601
5175                  * respectively
5176                  */
5177                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
5178                         if (dc_crtc_timing->flags.Y_ONLY)
5179                                 color_space =
5180                                         COLOR_SPACE_YCBCR709_LIMITED;
5181                         else
5182                                 color_space = COLOR_SPACE_YCBCR709;
5183                 } else {
5184                         if (dc_crtc_timing->flags.Y_ONLY)
5185                                 color_space =
5186                                         COLOR_SPACE_YCBCR601_LIMITED;
5187                         else
5188                                 color_space = COLOR_SPACE_YCBCR601;
5189                 }
5190
5191         }
5192         break;
5193         case PIXEL_ENCODING_RGB:
5194                 color_space = COLOR_SPACE_SRGB;
5195                 break;
5196
5197         default:
5198                 WARN_ON(1);
5199                 break;
5200         }
5201
5202         return color_space;
5203 }
5204
5205 static bool adjust_colour_depth_from_display_info(
5206         struct dc_crtc_timing *timing_out,
5207         const struct drm_display_info *info)
5208 {
5209         enum dc_color_depth depth = timing_out->display_color_depth;
5210         int normalized_clk;
5211         do {
5212                 normalized_clk = timing_out->pix_clk_100hz / 10;
5213                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5214                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5215                         normalized_clk /= 2;
5216                 /* Adjusting pix clock following on HDMI spec based on colour depth */
5217                 switch (depth) {
5218                 case COLOR_DEPTH_888:
5219                         break;
5220                 case COLOR_DEPTH_101010:
5221                         normalized_clk = (normalized_clk * 30) / 24;
5222                         break;
5223                 case COLOR_DEPTH_121212:
5224                         normalized_clk = (normalized_clk * 36) / 24;
5225                         break;
5226                 case COLOR_DEPTH_161616:
5227                         normalized_clk = (normalized_clk * 48) / 24;
5228                         break;
5229                 default:
5230                         /* The above depths are the only ones valid for HDMI. */
5231                         return false;
5232                 }
5233                 if (normalized_clk <= info->max_tmds_clock) {
5234                         timing_out->display_color_depth = depth;
5235                         return true;
5236                 }
5237         } while (--depth > COLOR_DEPTH_666);
5238         return false;
5239 }
5240
5241 static void fill_stream_properties_from_drm_display_mode(
5242         struct dc_stream_state *stream,
5243         const struct drm_display_mode *mode_in,
5244         const struct drm_connector *connector,
5245         const struct drm_connector_state *connector_state,
5246         const struct dc_stream_state *old_stream,
5247         int requested_bpc)
5248 {
5249         struct dc_crtc_timing *timing_out = &stream->timing;
5250         const struct drm_display_info *info = &connector->display_info;
5251         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5252         struct hdmi_vendor_infoframe hv_frame;
5253         struct hdmi_avi_infoframe avi_frame;
5254
5255         memset(&hv_frame, 0, sizeof(hv_frame));
5256         memset(&avi_frame, 0, sizeof(avi_frame));
5257
5258         timing_out->h_border_left = 0;
5259         timing_out->h_border_right = 0;
5260         timing_out->v_border_top = 0;
5261         timing_out->v_border_bottom = 0;
5262         /* TODO: un-hardcode */
5263         if (drm_mode_is_420_only(info, mode_in)
5264                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5265                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5266         else if (drm_mode_is_420_also(info, mode_in)
5267                         && aconnector->force_yuv420_output)
5268                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5269         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5270                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5271                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5272         else
5273                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5274
5275         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5276         timing_out->display_color_depth = convert_color_depth_from_display_info(
5277                 connector,
5278                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5279                 requested_bpc);
5280         timing_out->scan_type = SCANNING_TYPE_NODATA;
5281         timing_out->hdmi_vic = 0;
5282
5283         if(old_stream) {
5284                 timing_out->vic = old_stream->timing.vic;
5285                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5286                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5287         } else {
5288                 timing_out->vic = drm_match_cea_mode(mode_in);
5289                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5290                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5291                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5292                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5293         }
5294
5295         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5296                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5297                 timing_out->vic = avi_frame.video_code;
5298                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5299                 timing_out->hdmi_vic = hv_frame.vic;
5300         }
5301
5302         if (is_freesync_video_mode(mode_in, aconnector)) {
5303                 timing_out->h_addressable = mode_in->hdisplay;
5304                 timing_out->h_total = mode_in->htotal;
5305                 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5306                 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5307                 timing_out->v_total = mode_in->vtotal;
5308                 timing_out->v_addressable = mode_in->vdisplay;
5309                 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5310                 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5311                 timing_out->pix_clk_100hz = mode_in->clock * 10;
5312         } else {
5313                 timing_out->h_addressable = mode_in->crtc_hdisplay;
5314                 timing_out->h_total = mode_in->crtc_htotal;
5315                 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5316                 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5317                 timing_out->v_total = mode_in->crtc_vtotal;
5318                 timing_out->v_addressable = mode_in->crtc_vdisplay;
5319                 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5320                 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5321                 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5322         }
5323
5324         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5325
5326         stream->output_color_space = get_output_color_space(timing_out);
5327
5328         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5329         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5330         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5331                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5332                     drm_mode_is_420_also(info, mode_in) &&
5333                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5334                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5335                         adjust_colour_depth_from_display_info(timing_out, info);
5336                 }
5337         }
5338 }
5339
5340 static void fill_audio_info(struct audio_info *audio_info,
5341                             const struct drm_connector *drm_connector,
5342                             const struct dc_sink *dc_sink)
5343 {
5344         int i = 0;
5345         int cea_revision = 0;
5346         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5347
5348         audio_info->manufacture_id = edid_caps->manufacturer_id;
5349         audio_info->product_id = edid_caps->product_id;
5350
5351         cea_revision = drm_connector->display_info.cea_rev;
5352
5353         strscpy(audio_info->display_name,
5354                 edid_caps->display_name,
5355                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5356
5357         if (cea_revision >= 3) {
5358                 audio_info->mode_count = edid_caps->audio_mode_count;
5359
5360                 for (i = 0; i < audio_info->mode_count; ++i) {
5361                         audio_info->modes[i].format_code =
5362                                         (enum audio_format_code)
5363                                         (edid_caps->audio_modes[i].format_code);
5364                         audio_info->modes[i].channel_count =
5365                                         edid_caps->audio_modes[i].channel_count;
5366                         audio_info->modes[i].sample_rates.all =
5367                                         edid_caps->audio_modes[i].sample_rate;
5368                         audio_info->modes[i].sample_size =
5369                                         edid_caps->audio_modes[i].sample_size;
5370                 }
5371         }
5372
5373         audio_info->flags.all = edid_caps->speaker_flags;
5374
5375         /* TODO: We only check for the progressive mode, check for interlace mode too */
5376         if (drm_connector->latency_present[0]) {
5377                 audio_info->video_latency = drm_connector->video_latency[0];
5378                 audio_info->audio_latency = drm_connector->audio_latency[0];
5379         }
5380
5381         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5382
5383 }
5384
5385 static void
5386 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5387                                       struct drm_display_mode *dst_mode)
5388 {
5389         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5390         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5391         dst_mode->crtc_clock = src_mode->crtc_clock;
5392         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5393         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5394         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5395         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5396         dst_mode->crtc_htotal = src_mode->crtc_htotal;
5397         dst_mode->crtc_hskew = src_mode->crtc_hskew;
5398         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5399         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5400         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5401         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5402         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5403 }
5404
5405 static void
5406 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5407                                         const struct drm_display_mode *native_mode,
5408                                         bool scale_enabled)
5409 {
5410         if (scale_enabled) {
5411                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5412         } else if (native_mode->clock == drm_mode->clock &&
5413                         native_mode->htotal == drm_mode->htotal &&
5414                         native_mode->vtotal == drm_mode->vtotal) {
5415                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5416         } else {
5417                 /* no scaling nor amdgpu inserted, no need to patch */
5418         }
5419 }
5420
5421 static struct dc_sink *
5422 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5423 {
5424         struct dc_sink_init_data sink_init_data = { 0 };
5425         struct dc_sink *sink = NULL;
5426         sink_init_data.link = aconnector->dc_link;
5427         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5428
5429         sink = dc_sink_create(&sink_init_data);
5430         if (!sink) {
5431                 DRM_ERROR("Failed to create sink!\n");
5432                 return NULL;
5433         }
5434         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5435
5436         return sink;
5437 }
5438
5439 static void set_multisync_trigger_params(
5440                 struct dc_stream_state *stream)
5441 {
5442         struct dc_stream_state *master = NULL;
5443
5444         if (stream->triggered_crtc_reset.enabled) {
5445                 master = stream->triggered_crtc_reset.event_source;
5446                 stream->triggered_crtc_reset.event =
5447                         master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5448                         CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5449                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5450         }
5451 }
5452
5453 static void set_master_stream(struct dc_stream_state *stream_set[],
5454                               int stream_count)
5455 {
5456         int j, highest_rfr = 0, master_stream = 0;
5457
5458         for (j = 0;  j < stream_count; j++) {
5459                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5460                         int refresh_rate = 0;
5461
5462                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5463                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5464                         if (refresh_rate > highest_rfr) {
5465                                 highest_rfr = refresh_rate;
5466                                 master_stream = j;
5467                         }
5468                 }
5469         }
5470         for (j = 0;  j < stream_count; j++) {
5471                 if (stream_set[j])
5472                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5473         }
5474 }
5475
5476 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5477 {
5478         int i = 0;
5479         struct dc_stream_state *stream;
5480
5481         if (context->stream_count < 2)
5482                 return;
5483         for (i = 0; i < context->stream_count ; i++) {
5484                 if (!context->streams[i])
5485                         continue;
5486                 /*
5487                  * TODO: add a function to read AMD VSDB bits and set
5488                  * crtc_sync_master.multi_sync_enabled flag
5489                  * For now it's set to false
5490                  */
5491         }
5492
5493         set_master_stream(context->streams, context->stream_count);
5494
5495         for (i = 0; i < context->stream_count ; i++) {
5496                 stream = context->streams[i];
5497
5498                 if (!stream)
5499                         continue;
5500
5501                 set_multisync_trigger_params(stream);
5502         }
5503 }
5504
5505 static struct drm_display_mode *
5506 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5507                           bool use_probed_modes)
5508 {
5509         struct drm_display_mode *m, *m_pref = NULL;
5510         u16 current_refresh, highest_refresh;
5511         struct list_head *list_head = use_probed_modes ?
5512                                                     &aconnector->base.probed_modes :
5513                                                     &aconnector->base.modes;
5514
5515         if (aconnector->freesync_vid_base.clock != 0)
5516                 return &aconnector->freesync_vid_base;
5517
5518         /* Find the preferred mode */
5519         list_for_each_entry (m, list_head, head) {
5520                 if (m->type & DRM_MODE_TYPE_PREFERRED) {
5521                         m_pref = m;
5522                         break;
5523                 }
5524         }
5525
5526         if (!m_pref) {
5527                 /* Probably an EDID with no preferred mode. Fallback to first entry */
5528                 m_pref = list_first_entry_or_null(
5529                         &aconnector->base.modes, struct drm_display_mode, head);
5530                 if (!m_pref) {
5531                         DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5532                         return NULL;
5533                 }
5534         }
5535
5536         highest_refresh = drm_mode_vrefresh(m_pref);
5537
5538         /*
5539          * Find the mode with highest refresh rate with same resolution.
5540          * For some monitors, preferred mode is not the mode with highest
5541          * supported refresh rate.
5542          */
5543         list_for_each_entry (m, list_head, head) {
5544                 current_refresh  = drm_mode_vrefresh(m);
5545
5546                 if (m->hdisplay == m_pref->hdisplay &&
5547                     m->vdisplay == m_pref->vdisplay &&
5548                     highest_refresh < current_refresh) {
5549                         highest_refresh = current_refresh;
5550                         m_pref = m;
5551                 }
5552         }
5553
5554         aconnector->freesync_vid_base = *m_pref;
5555         return m_pref;
5556 }
5557
5558 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
5559                                    struct amdgpu_dm_connector *aconnector)
5560 {
5561         struct drm_display_mode *high_mode;
5562         int timing_diff;
5563
5564         high_mode = get_highest_refresh_rate_mode(aconnector, false);
5565         if (!high_mode || !mode)
5566                 return false;
5567
5568         timing_diff = high_mode->vtotal - mode->vtotal;
5569
5570         if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5571             high_mode->hdisplay != mode->hdisplay ||
5572             high_mode->vdisplay != mode->vdisplay ||
5573             high_mode->hsync_start != mode->hsync_start ||
5574             high_mode->hsync_end != mode->hsync_end ||
5575             high_mode->htotal != mode->htotal ||
5576             high_mode->hskew != mode->hskew ||
5577             high_mode->vscan != mode->vscan ||
5578             high_mode->vsync_start - mode->vsync_start != timing_diff ||
5579             high_mode->vsync_end - mode->vsync_end != timing_diff)
5580                 return false;
5581         else
5582                 return true;
5583 }
5584
5585 static struct dc_stream_state *
5586 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5587                        const struct drm_display_mode *drm_mode,
5588                        const struct dm_connector_state *dm_state,
5589                        const struct dc_stream_state *old_stream,
5590                        int requested_bpc)
5591 {
5592         struct drm_display_mode *preferred_mode = NULL;
5593         struct drm_connector *drm_connector;
5594         const struct drm_connector_state *con_state =
5595                 dm_state ? &dm_state->base : NULL;
5596         struct dc_stream_state *stream = NULL;
5597         struct drm_display_mode mode = *drm_mode;
5598         struct drm_display_mode saved_mode;
5599         struct drm_display_mode *freesync_mode = NULL;
5600         bool native_mode_found = false;
5601         bool recalculate_timing = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5602         int mode_refresh;
5603         int preferred_refresh = 0;
5604 #if defined(CONFIG_DRM_AMD_DC_DCN)
5605         struct dsc_dec_dpcd_caps dsc_caps;
5606         uint32_t link_bandwidth_kbps;
5607 #endif
5608         struct dc_sink *sink = NULL;
5609
5610         memset(&saved_mode, 0, sizeof(saved_mode));
5611
5612         if (aconnector == NULL) {
5613                 DRM_ERROR("aconnector is NULL!\n");
5614                 return stream;
5615         }
5616
5617         drm_connector = &aconnector->base;
5618
5619         if (!aconnector->dc_sink) {
5620                 sink = create_fake_sink(aconnector);
5621                 if (!sink)
5622                         return stream;
5623         } else {
5624                 sink = aconnector->dc_sink;
5625                 dc_sink_retain(sink);
5626         }
5627
5628         stream = dc_create_stream_for_sink(sink);
5629
5630         if (stream == NULL) {
5631                 DRM_ERROR("Failed to create stream for sink!\n");
5632                 goto finish;
5633         }
5634
5635         stream->dm_stream_context = aconnector;
5636
5637         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5638                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5639
5640         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5641                 /* Search for preferred mode */
5642                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5643                         native_mode_found = true;
5644                         break;
5645                 }
5646         }
5647         if (!native_mode_found)
5648                 preferred_mode = list_first_entry_or_null(
5649                                 &aconnector->base.modes,
5650                                 struct drm_display_mode,
5651                                 head);
5652
5653         mode_refresh = drm_mode_vrefresh(&mode);
5654
5655         if (preferred_mode == NULL) {
5656                 /*
5657                  * This may not be an error, the use case is when we have no
5658                  * usermode calls to reset and set mode upon hotplug. In this
5659                  * case, we call set mode ourselves to restore the previous mode
5660                  * and the modelist may not be filled in in time.
5661                  */
5662                 DRM_DEBUG_DRIVER("No preferred mode found\n");
5663         } else {
5664                 recalculate_timing |= amdgpu_freesync_vid_mode &&
5665                                  is_freesync_video_mode(&mode, aconnector);
5666                 if (recalculate_timing) {
5667                         freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5668                         saved_mode = mode;
5669                         mode = *freesync_mode;
5670                 } else {
5671                         decide_crtc_timing_for_drm_display_mode(
5672                                 &mode, preferred_mode,
5673                                 dm_state ? (dm_state->scaling != RMX_OFF) : false);
5674                 }
5675
5676                 preferred_refresh = drm_mode_vrefresh(preferred_mode);
5677         }
5678
5679         if (recalculate_timing)
5680                 drm_mode_set_crtcinfo(&saved_mode, 0);
5681         else if (!dm_state)
5682                 drm_mode_set_crtcinfo(&mode, 0);
5683
5684        /*
5685         * If scaling is enabled and refresh rate didn't change
5686         * we copy the vic and polarities of the old timings
5687         */
5688         if (!recalculate_timing || mode_refresh != preferred_refresh)
5689                 fill_stream_properties_from_drm_display_mode(
5690                         stream, &mode, &aconnector->base, con_state, NULL,
5691                         requested_bpc);
5692         else
5693                 fill_stream_properties_from_drm_display_mode(
5694                         stream, &mode, &aconnector->base, con_state, old_stream,
5695                         requested_bpc);
5696
5697         stream->timing.flags.DSC = 0;
5698
5699         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5700 #if defined(CONFIG_DRM_AMD_DC_DCN)
5701                 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5702                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5703                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5704                                       &dsc_caps);
5705                 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5706                                                              dc_link_get_link_cap(aconnector->dc_link));
5707
5708                 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5709                         /* Set DSC policy according to dsc_clock_en */
5710                         dc_dsc_policy_set_enable_dsc_when_not_needed(
5711                                 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5712
5713                         if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5714                                                   &dsc_caps,
5715                                                   aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5716                                                   0,
5717                                                   link_bandwidth_kbps,
5718                                                   &stream->timing,
5719                                                   &stream->timing.dsc_cfg))
5720                                 stream->timing.flags.DSC = 1;
5721                         /* Overwrite the stream flag if DSC is enabled through debugfs */
5722                         if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5723                                 stream->timing.flags.DSC = 1;
5724
5725                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5726                                 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5727
5728                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5729                                 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5730
5731                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5732                                 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5733                 }
5734 #endif
5735         }
5736
5737         update_stream_scaling_settings(&mode, dm_state, stream);
5738
5739         fill_audio_info(
5740                 &stream->audio_info,
5741                 drm_connector,
5742                 sink);
5743
5744         update_stream_signal(stream, sink);
5745
5746         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5747                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5748
5749         if (stream->link->psr_settings.psr_feature_enabled) {
5750                 //
5751                 // should decide stream support vsc sdp colorimetry capability
5752                 // before building vsc info packet
5753                 //
5754                 stream->use_vsc_sdp_for_colorimetry = false;
5755                 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5756                         stream->use_vsc_sdp_for_colorimetry =
5757                                 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5758                 } else {
5759                         if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5760                                 stream->use_vsc_sdp_for_colorimetry = true;
5761                 }
5762                 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5763         }
5764 finish:
5765         dc_sink_release(sink);
5766
5767         return stream;
5768 }
5769
5770 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5771 {
5772         drm_crtc_cleanup(crtc);
5773         kfree(crtc);
5774 }
5775
5776 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5777                                   struct drm_crtc_state *state)
5778 {
5779         struct dm_crtc_state *cur = to_dm_crtc_state(state);
5780
5781         /* TODO Destroy dc_stream objects are stream object is flattened */
5782         if (cur->stream)
5783                 dc_stream_release(cur->stream);
5784
5785
5786         __drm_atomic_helper_crtc_destroy_state(state);
5787
5788
5789         kfree(state);
5790 }
5791
5792 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5793 {
5794         struct dm_crtc_state *state;
5795
5796         if (crtc->state)
5797                 dm_crtc_destroy_state(crtc, crtc->state);
5798
5799         state = kzalloc(sizeof(*state), GFP_KERNEL);
5800         if (WARN_ON(!state))
5801                 return;
5802
5803         __drm_atomic_helper_crtc_reset(crtc, &state->base);
5804 }
5805
5806 static struct drm_crtc_state *
5807 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5808 {
5809         struct dm_crtc_state *state, *cur;
5810
5811         cur = to_dm_crtc_state(crtc->state);
5812
5813         if (WARN_ON(!crtc->state))
5814                 return NULL;
5815
5816         state = kzalloc(sizeof(*state), GFP_KERNEL);
5817         if (!state)
5818                 return NULL;
5819
5820         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5821
5822         if (cur->stream) {
5823                 state->stream = cur->stream;
5824                 dc_stream_retain(state->stream);
5825         }
5826
5827         state->active_planes = cur->active_planes;
5828         state->vrr_infopacket = cur->vrr_infopacket;
5829         state->abm_level = cur->abm_level;
5830         state->vrr_supported = cur->vrr_supported;
5831         state->freesync_config = cur->freesync_config;
5832         state->cm_has_degamma = cur->cm_has_degamma;
5833         state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5834         /* TODO Duplicate dc_stream after objects are stream object is flattened */
5835
5836         return &state->base;
5837 }
5838
5839 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
5840 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
5841 {
5842         crtc_debugfs_init(crtc);
5843
5844         return 0;
5845 }
5846 #endif
5847
5848 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5849 {
5850         enum dc_irq_source irq_source;
5851         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5852         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5853         int rc;
5854
5855         irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5856
5857         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5858
5859         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
5860                       acrtc->crtc_id, enable ? "en" : "dis", rc);
5861         return rc;
5862 }
5863
5864 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5865 {
5866         enum dc_irq_source irq_source;
5867         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5868         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5869         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5870 #if defined(CONFIG_DRM_AMD_DC_DCN)
5871         struct amdgpu_display_manager *dm = &adev->dm;
5872         unsigned long flags;
5873 #endif
5874         int rc = 0;
5875
5876         if (enable) {
5877                 /* vblank irq on -> Only need vupdate irq in vrr mode */
5878                 if (amdgpu_dm_vrr_active(acrtc_state))
5879                         rc = dm_set_vupdate_irq(crtc, true);
5880         } else {
5881                 /* vblank irq off -> vupdate irq off */
5882                 rc = dm_set_vupdate_irq(crtc, false);
5883         }
5884
5885         if (rc)
5886                 return rc;
5887
5888         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5889
5890         if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5891                 return -EBUSY;
5892
5893         if (amdgpu_in_reset(adev))
5894                 return 0;
5895
5896 #if defined(CONFIG_DRM_AMD_DC_DCN)
5897         spin_lock_irqsave(&dm->vblank_lock, flags);
5898         dm->vblank_workqueue->dm = dm;
5899         dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
5900         dm->vblank_workqueue->enable = enable;
5901         spin_unlock_irqrestore(&dm->vblank_lock, flags);
5902         schedule_work(&dm->vblank_workqueue->mall_work);
5903 #endif
5904
5905         return 0;
5906 }
5907
5908 static int dm_enable_vblank(struct drm_crtc *crtc)
5909 {
5910         return dm_set_vblank(crtc, true);
5911 }
5912
5913 static void dm_disable_vblank(struct drm_crtc *crtc)
5914 {
5915         dm_set_vblank(crtc, false);
5916 }
5917
5918 /* Implemented only the options currently availible for the driver */
5919 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5920         .reset = dm_crtc_reset_state,
5921         .destroy = amdgpu_dm_crtc_destroy,
5922         .set_config = drm_atomic_helper_set_config,
5923         .page_flip = drm_atomic_helper_page_flip,
5924         .atomic_duplicate_state = dm_crtc_duplicate_state,
5925         .atomic_destroy_state = dm_crtc_destroy_state,
5926         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
5927         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5928         .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5929         .get_vblank_counter = amdgpu_get_vblank_counter_kms,
5930         .enable_vblank = dm_enable_vblank,
5931         .disable_vblank = dm_disable_vblank,
5932         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5933 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
5934         .late_register = amdgpu_dm_crtc_late_register,
5935 #endif
5936 };
5937
5938 static enum drm_connector_status
5939 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5940 {
5941         bool connected;
5942         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5943
5944         /*
5945          * Notes:
5946          * 1. This interface is NOT called in context of HPD irq.
5947          * 2. This interface *is called* in context of user-mode ioctl. Which
5948          * makes it a bad place for *any* MST-related activity.
5949          */
5950
5951         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5952             !aconnector->fake_enable)
5953                 connected = (aconnector->dc_sink != NULL);
5954         else
5955                 connected = (aconnector->base.force == DRM_FORCE_ON);
5956
5957         update_subconnector_property(aconnector);
5958
5959         return (connected ? connector_status_connected :
5960                         connector_status_disconnected);
5961 }
5962
5963 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5964                                             struct drm_connector_state *connector_state,
5965                                             struct drm_property *property,
5966                                             uint64_t val)
5967 {
5968         struct drm_device *dev = connector->dev;
5969         struct amdgpu_device *adev = drm_to_adev(dev);
5970         struct dm_connector_state *dm_old_state =
5971                 to_dm_connector_state(connector->state);
5972         struct dm_connector_state *dm_new_state =
5973                 to_dm_connector_state(connector_state);
5974
5975         int ret = -EINVAL;
5976
5977         if (property == dev->mode_config.scaling_mode_property) {
5978                 enum amdgpu_rmx_type rmx_type;
5979
5980                 switch (val) {
5981                 case DRM_MODE_SCALE_CENTER:
5982                         rmx_type = RMX_CENTER;
5983                         break;
5984                 case DRM_MODE_SCALE_ASPECT:
5985                         rmx_type = RMX_ASPECT;
5986                         break;
5987                 case DRM_MODE_SCALE_FULLSCREEN:
5988                         rmx_type = RMX_FULL;
5989                         break;
5990                 case DRM_MODE_SCALE_NONE:
5991                 default:
5992                         rmx_type = RMX_OFF;
5993                         break;
5994                 }
5995
5996                 if (dm_old_state->scaling == rmx_type)
5997                         return 0;
5998
5999                 dm_new_state->scaling = rmx_type;
6000                 ret = 0;
6001         } else if (property == adev->mode_info.underscan_hborder_property) {
6002                 dm_new_state->underscan_hborder = val;
6003                 ret = 0;
6004         } else if (property == adev->mode_info.underscan_vborder_property) {
6005                 dm_new_state->underscan_vborder = val;
6006                 ret = 0;
6007         } else if (property == adev->mode_info.underscan_property) {
6008                 dm_new_state->underscan_enable = val;
6009                 ret = 0;
6010         } else if (property == adev->mode_info.abm_level_property) {
6011                 dm_new_state->abm_level = val;
6012                 ret = 0;
6013         }
6014
6015         return ret;
6016 }
6017
6018 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6019                                             const struct drm_connector_state *state,
6020                                             struct drm_property *property,
6021                                             uint64_t *val)
6022 {
6023         struct drm_device *dev = connector->dev;
6024         struct amdgpu_device *adev = drm_to_adev(dev);
6025         struct dm_connector_state *dm_state =
6026                 to_dm_connector_state(state);
6027         int ret = -EINVAL;
6028
6029         if (property == dev->mode_config.scaling_mode_property) {
6030                 switch (dm_state->scaling) {
6031                 case RMX_CENTER:
6032                         *val = DRM_MODE_SCALE_CENTER;
6033                         break;
6034                 case RMX_ASPECT:
6035                         *val = DRM_MODE_SCALE_ASPECT;
6036                         break;
6037                 case RMX_FULL:
6038                         *val = DRM_MODE_SCALE_FULLSCREEN;
6039                         break;
6040                 case RMX_OFF:
6041                 default:
6042                         *val = DRM_MODE_SCALE_NONE;
6043                         break;
6044                 }
6045                 ret = 0;
6046         } else if (property == adev->mode_info.underscan_hborder_property) {
6047                 *val = dm_state->underscan_hborder;
6048                 ret = 0;
6049         } else if (property == adev->mode_info.underscan_vborder_property) {
6050                 *val = dm_state->underscan_vborder;
6051                 ret = 0;
6052         } else if (property == adev->mode_info.underscan_property) {
6053                 *val = dm_state->underscan_enable;
6054                 ret = 0;
6055         } else if (property == adev->mode_info.abm_level_property) {
6056                 *val = dm_state->abm_level;
6057                 ret = 0;
6058         }
6059
6060         return ret;
6061 }
6062
6063 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6064 {
6065         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6066
6067         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6068 }
6069
6070 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6071 {
6072         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6073         const struct dc_link *link = aconnector->dc_link;
6074         struct amdgpu_device *adev = drm_to_adev(connector->dev);
6075         struct amdgpu_display_manager *dm = &adev->dm;
6076
6077         /*
6078          * Call only if mst_mgr was iniitalized before since it's not done
6079          * for all connector types.
6080          */
6081         if (aconnector->mst_mgr.dev)
6082                 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6083
6084 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6085         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6086
6087         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
6088             link->type != dc_connection_none &&
6089             dm->backlight_dev) {
6090                 backlight_device_unregister(dm->backlight_dev);
6091                 dm->backlight_dev = NULL;
6092         }
6093 #endif
6094
6095         if (aconnector->dc_em_sink)
6096                 dc_sink_release(aconnector->dc_em_sink);
6097         aconnector->dc_em_sink = NULL;
6098         if (aconnector->dc_sink)
6099                 dc_sink_release(aconnector->dc_sink);
6100         aconnector->dc_sink = NULL;
6101
6102         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6103         drm_connector_unregister(connector);
6104         drm_connector_cleanup(connector);
6105         if (aconnector->i2c) {
6106                 i2c_del_adapter(&aconnector->i2c->base);
6107                 kfree(aconnector->i2c);
6108         }
6109         kfree(aconnector->dm_dp_aux.aux.name);
6110
6111         kfree(connector);
6112 }
6113
6114 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6115 {
6116         struct dm_connector_state *state =
6117                 to_dm_connector_state(connector->state);
6118
6119         if (connector->state)
6120                 __drm_atomic_helper_connector_destroy_state(connector->state);
6121
6122         kfree(state);
6123
6124         state = kzalloc(sizeof(*state), GFP_KERNEL);
6125
6126         if (state) {
6127                 state->scaling = RMX_OFF;
6128                 state->underscan_enable = false;
6129                 state->underscan_hborder = 0;
6130                 state->underscan_vborder = 0;
6131                 state->base.max_requested_bpc = 8;
6132                 state->vcpi_slots = 0;
6133                 state->pbn = 0;
6134                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6135                         state->abm_level = amdgpu_dm_abm_level;
6136
6137                 __drm_atomic_helper_connector_reset(connector, &state->base);
6138         }
6139 }
6140
6141 struct drm_connector_state *
6142 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6143 {
6144         struct dm_connector_state *state =
6145                 to_dm_connector_state(connector->state);
6146
6147         struct dm_connector_state *new_state =
6148                         kmemdup(state, sizeof(*state), GFP_KERNEL);
6149
6150         if (!new_state)
6151                 return NULL;
6152
6153         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6154
6155         new_state->freesync_capable = state->freesync_capable;
6156         new_state->abm_level = state->abm_level;
6157         new_state->scaling = state->scaling;
6158         new_state->underscan_enable = state->underscan_enable;
6159         new_state->underscan_hborder = state->underscan_hborder;
6160         new_state->underscan_vborder = state->underscan_vborder;
6161         new_state->vcpi_slots = state->vcpi_slots;
6162         new_state->pbn = state->pbn;
6163         return &new_state->base;
6164 }
6165
6166 static int
6167 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6168 {
6169         struct amdgpu_dm_connector *amdgpu_dm_connector =
6170                 to_amdgpu_dm_connector(connector);
6171         int r;
6172
6173         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6174             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6175                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6176                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6177                 if (r)
6178                         return r;
6179         }
6180
6181 #if defined(CONFIG_DEBUG_FS)
6182         connector_debugfs_init(amdgpu_dm_connector);
6183 #endif
6184
6185         return 0;
6186 }
6187
6188 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6189         .reset = amdgpu_dm_connector_funcs_reset,
6190         .detect = amdgpu_dm_connector_detect,
6191         .fill_modes = drm_helper_probe_single_connector_modes,
6192         .destroy = amdgpu_dm_connector_destroy,
6193         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6194         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6195         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6196         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6197         .late_register = amdgpu_dm_connector_late_register,
6198         .early_unregister = amdgpu_dm_connector_unregister
6199 };
6200
6201 static int get_modes(struct drm_connector *connector)
6202 {
6203         return amdgpu_dm_connector_get_modes(connector);
6204 }
6205
6206 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6207 {
6208         struct dc_sink_init_data init_params = {
6209                         .link = aconnector->dc_link,
6210                         .sink_signal = SIGNAL_TYPE_VIRTUAL
6211         };
6212         struct edid *edid;
6213
6214         if (!aconnector->base.edid_blob_ptr) {
6215                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6216                                 aconnector->base.name);
6217
6218                 aconnector->base.force = DRM_FORCE_OFF;
6219                 aconnector->base.override_edid = false;
6220                 return;
6221         }
6222
6223         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6224
6225         aconnector->edid = edid;
6226
6227         aconnector->dc_em_sink = dc_link_add_remote_sink(
6228                 aconnector->dc_link,
6229                 (uint8_t *)edid,
6230                 (edid->extensions + 1) * EDID_LENGTH,
6231                 &init_params);
6232
6233         if (aconnector->base.force == DRM_FORCE_ON) {
6234                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6235                 aconnector->dc_link->local_sink :
6236                 aconnector->dc_em_sink;
6237                 dc_sink_retain(aconnector->dc_sink);
6238         }
6239 }
6240
6241 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6242 {
6243         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6244
6245         /*
6246          * In case of headless boot with force on for DP managed connector
6247          * Those settings have to be != 0 to get initial modeset
6248          */
6249         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6250                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6251                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6252         }
6253
6254
6255         aconnector->base.override_edid = true;
6256         create_eml_sink(aconnector);
6257 }
6258
6259 static struct dc_stream_state *
6260 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6261                                 const struct drm_display_mode *drm_mode,
6262                                 const struct dm_connector_state *dm_state,
6263                                 const struct dc_stream_state *old_stream)
6264 {
6265         struct drm_connector *connector = &aconnector->base;
6266         struct amdgpu_device *adev = drm_to_adev(connector->dev);
6267         struct dc_stream_state *stream;
6268         const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6269         int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6270         enum dc_status dc_result = DC_OK;
6271
6272         do {
6273                 stream = create_stream_for_sink(aconnector, drm_mode,
6274                                                 dm_state, old_stream,
6275                                                 requested_bpc);
6276                 if (stream == NULL) {
6277                         DRM_ERROR("Failed to create stream for sink!\n");
6278                         break;
6279                 }
6280
6281                 dc_result = dc_validate_stream(adev->dm.dc, stream);
6282
6283                 if (dc_result != DC_OK) {
6284                         DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6285                                       drm_mode->hdisplay,
6286                                       drm_mode->vdisplay,
6287                                       drm_mode->clock,
6288                                       dc_result,
6289                                       dc_status_to_str(dc_result));
6290
6291                         dc_stream_release(stream);
6292                         stream = NULL;
6293                         requested_bpc -= 2; /* lower bpc to retry validation */
6294                 }
6295
6296         } while (stream == NULL && requested_bpc >= 6);
6297
6298         if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6299                 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6300
6301                 aconnector->force_yuv420_output = true;
6302                 stream = create_validate_stream_for_sink(aconnector, drm_mode,
6303                                                 dm_state, old_stream);
6304                 aconnector->force_yuv420_output = false;
6305         }
6306
6307         return stream;
6308 }
6309
6310 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6311                                    struct drm_display_mode *mode)
6312 {
6313         int result = MODE_ERROR;
6314         struct dc_sink *dc_sink;
6315         /* TODO: Unhardcode stream count */
6316         struct dc_stream_state *stream;
6317         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6318
6319         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6320                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6321                 return result;
6322
6323         /*
6324          * Only run this the first time mode_valid is called to initilialize
6325          * EDID mgmt
6326          */
6327         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6328                 !aconnector->dc_em_sink)
6329                 handle_edid_mgmt(aconnector);
6330
6331         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6332
6333         if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6334                                 aconnector->base.force != DRM_FORCE_ON) {
6335                 DRM_ERROR("dc_sink is NULL!\n");
6336                 goto fail;
6337         }
6338
6339         stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6340         if (stream) {
6341                 dc_stream_release(stream);
6342                 result = MODE_OK;
6343         }
6344
6345 fail:
6346         /* TODO: error handling*/
6347         return result;
6348 }
6349
6350 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6351                                 struct dc_info_packet *out)
6352 {
6353         struct hdmi_drm_infoframe frame;
6354         unsigned char buf[30]; /* 26 + 4 */
6355         ssize_t len;
6356         int ret, i;
6357
6358         memset(out, 0, sizeof(*out));
6359
6360         if (!state->hdr_output_metadata)
6361                 return 0;
6362
6363         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6364         if (ret)
6365                 return ret;
6366
6367         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6368         if (len < 0)
6369                 return (int)len;
6370
6371         /* Static metadata is a fixed 26 bytes + 4 byte header. */
6372         if (len != 30)
6373                 return -EINVAL;
6374
6375         /* Prepare the infopacket for DC. */
6376         switch (state->connector->connector_type) {
6377         case DRM_MODE_CONNECTOR_HDMIA:
6378                 out->hb0 = 0x87; /* type */
6379                 out->hb1 = 0x01; /* version */
6380                 out->hb2 = 0x1A; /* length */
6381                 out->sb[0] = buf[3]; /* checksum */
6382                 i = 1;
6383                 break;
6384
6385         case DRM_MODE_CONNECTOR_DisplayPort:
6386         case DRM_MODE_CONNECTOR_eDP:
6387                 out->hb0 = 0x00; /* sdp id, zero */
6388                 out->hb1 = 0x87; /* type */
6389                 out->hb2 = 0x1D; /* payload len - 1 */
6390                 out->hb3 = (0x13 << 2); /* sdp version */
6391                 out->sb[0] = 0x01; /* version */
6392                 out->sb[1] = 0x1A; /* length */
6393                 i = 2;
6394                 break;
6395
6396         default:
6397                 return -EINVAL;
6398         }
6399
6400         memcpy(&out->sb[i], &buf[4], 26);
6401         out->valid = true;
6402
6403         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6404                        sizeof(out->sb), false);
6405
6406         return 0;
6407 }
6408
6409 static bool
6410 is_hdr_metadata_different(const struct drm_connector_state *old_state,
6411                           const struct drm_connector_state *new_state)
6412 {
6413         struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
6414         struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
6415
6416         if (old_blob != new_blob) {
6417                 if (old_blob && new_blob &&
6418                     old_blob->length == new_blob->length)
6419                         return memcmp(old_blob->data, new_blob->data,
6420                                       old_blob->length);
6421
6422                 return true;
6423         }
6424
6425         return false;
6426 }
6427
6428 static int
6429 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6430                                  struct drm_atomic_state *state)
6431 {
6432         struct drm_connector_state *new_con_state =
6433                 drm_atomic_get_new_connector_state(state, conn);
6434         struct drm_connector_state *old_con_state =
6435                 drm_atomic_get_old_connector_state(state, conn);
6436         struct drm_crtc *crtc = new_con_state->crtc;
6437         struct drm_crtc_state *new_crtc_state;
6438         int ret;
6439
6440         trace_amdgpu_dm_connector_atomic_check(new_con_state);
6441
6442         if (!crtc)
6443                 return 0;
6444
6445         if (is_hdr_metadata_different(old_con_state, new_con_state)) {
6446                 struct dc_info_packet hdr_infopacket;
6447
6448                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6449                 if (ret)
6450                         return ret;
6451
6452                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6453                 if (IS_ERR(new_crtc_state))
6454                         return PTR_ERR(new_crtc_state);
6455
6456                 /*
6457                  * DC considers the stream backends changed if the
6458                  * static metadata changes. Forcing the modeset also
6459                  * gives a simple way for userspace to switch from
6460                  * 8bpc to 10bpc when setting the metadata to enter
6461                  * or exit HDR.
6462                  *
6463                  * Changing the static metadata after it's been
6464                  * set is permissible, however. So only force a
6465                  * modeset if we're entering or exiting HDR.
6466                  */
6467                 new_crtc_state->mode_changed =
6468                         !old_con_state->hdr_output_metadata ||
6469                         !new_con_state->hdr_output_metadata;
6470         }
6471
6472         return 0;
6473 }
6474
6475 static const struct drm_connector_helper_funcs
6476 amdgpu_dm_connector_helper_funcs = {
6477         /*
6478          * If hotplugging a second bigger display in FB Con mode, bigger resolution
6479          * modes will be filtered by drm_mode_validate_size(), and those modes
6480          * are missing after user start lightdm. So we need to renew modes list.
6481          * in get_modes call back, not just return the modes count
6482          */
6483         .get_modes = get_modes,
6484         .mode_valid = amdgpu_dm_connector_mode_valid,
6485         .atomic_check = amdgpu_dm_connector_atomic_check,
6486 };
6487
6488 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6489 {
6490 }
6491
6492 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6493 {
6494         struct drm_atomic_state *state = new_crtc_state->state;
6495         struct drm_plane *plane;
6496         int num_active = 0;
6497
6498         drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6499                 struct drm_plane_state *new_plane_state;
6500
6501                 /* Cursor planes are "fake". */
6502                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6503                         continue;
6504
6505                 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6506
6507                 if (!new_plane_state) {
6508                         /*
6509                          * The plane is enable on the CRTC and hasn't changed
6510                          * state. This means that it previously passed
6511                          * validation and is therefore enabled.
6512                          */
6513                         num_active += 1;
6514                         continue;
6515                 }
6516
6517                 /* We need a framebuffer to be considered enabled. */
6518                 num_active += (new_plane_state->fb != NULL);
6519         }
6520
6521         return num_active;
6522 }
6523
6524 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6525                                          struct drm_crtc_state *new_crtc_state)
6526 {
6527         struct dm_crtc_state *dm_new_crtc_state =
6528                 to_dm_crtc_state(new_crtc_state);
6529
6530         dm_new_crtc_state->active_planes = 0;
6531
6532         if (!dm_new_crtc_state->stream)
6533                 return;
6534
6535         dm_new_crtc_state->active_planes =
6536                 count_crtc_active_planes(new_crtc_state);
6537 }
6538
6539 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6540                                        struct drm_atomic_state *state)
6541 {
6542         struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6543                                                                           crtc);
6544         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6545         struct dc *dc = adev->dm.dc;
6546         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6547         int ret = -EINVAL;
6548
6549         trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6550
6551         dm_update_crtc_active_planes(crtc, crtc_state);
6552
6553         if (unlikely(!dm_crtc_state->stream &&
6554                      modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6555                 WARN_ON(1);
6556                 return ret;
6557         }
6558
6559         /*
6560          * We require the primary plane to be enabled whenever the CRTC is, otherwise
6561          * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6562          * planes are disabled, which is not supported by the hardware. And there is legacy
6563          * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6564          */
6565         if (crtc_state->enable &&
6566             !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6567                 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6568                 return -EINVAL;
6569         }
6570
6571         /* In some use cases, like reset, no stream is attached */
6572         if (!dm_crtc_state->stream)
6573                 return 0;
6574
6575         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6576                 return 0;
6577
6578         DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6579         return ret;
6580 }
6581
6582 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6583                                       const struct drm_display_mode *mode,
6584                                       struct drm_display_mode *adjusted_mode)
6585 {
6586         return true;
6587 }
6588
6589 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6590         .disable = dm_crtc_helper_disable,
6591         .atomic_check = dm_crtc_helper_atomic_check,
6592         .mode_fixup = dm_crtc_helper_mode_fixup,
6593         .get_scanout_position = amdgpu_crtc_get_scanout_position,
6594 };
6595
6596 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6597 {
6598
6599 }
6600
6601 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6602 {
6603         switch (display_color_depth) {
6604                 case COLOR_DEPTH_666:
6605                         return 6;
6606                 case COLOR_DEPTH_888:
6607                         return 8;
6608                 case COLOR_DEPTH_101010:
6609                         return 10;
6610                 case COLOR_DEPTH_121212:
6611                         return 12;
6612                 case COLOR_DEPTH_141414:
6613                         return 14;
6614                 case COLOR_DEPTH_161616:
6615                         return 16;
6616                 default:
6617                         break;
6618                 }
6619         return 0;
6620 }
6621
6622 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6623                                           struct drm_crtc_state *crtc_state,
6624                                           struct drm_connector_state *conn_state)
6625 {
6626         struct drm_atomic_state *state = crtc_state->state;
6627         struct drm_connector *connector = conn_state->connector;
6628         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6629         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6630         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6631         struct drm_dp_mst_topology_mgr *mst_mgr;
6632         struct drm_dp_mst_port *mst_port;
6633         enum dc_color_depth color_depth;
6634         int clock, bpp = 0;
6635         bool is_y420 = false;
6636
6637         if (!aconnector->port || !aconnector->dc_sink)
6638                 return 0;
6639
6640         mst_port = aconnector->port;
6641         mst_mgr = &aconnector->mst_port->mst_mgr;
6642
6643         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6644                 return 0;
6645
6646         if (!state->duplicated) {
6647                 int max_bpc = conn_state->max_requested_bpc;
6648                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6649                                 aconnector->force_yuv420_output;
6650                 color_depth = convert_color_depth_from_display_info(connector,
6651                                                                     is_y420,
6652                                                                     max_bpc);
6653                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6654                 clock = adjusted_mode->clock;
6655                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6656         }
6657         dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6658                                                                            mst_mgr,
6659                                                                            mst_port,
6660                                                                            dm_new_connector_state->pbn,
6661                                                                            dm_mst_get_pbn_divider(aconnector->dc_link));
6662         if (dm_new_connector_state->vcpi_slots < 0) {
6663                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6664                 return dm_new_connector_state->vcpi_slots;
6665         }
6666         return 0;
6667 }
6668
6669 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6670         .disable = dm_encoder_helper_disable,
6671         .atomic_check = dm_encoder_helper_atomic_check
6672 };
6673
6674 #if defined(CONFIG_DRM_AMD_DC_DCN)
6675 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6676                                             struct dc_state *dc_state)
6677 {
6678         struct dc_stream_state *stream = NULL;
6679         struct drm_connector *connector;
6680         struct drm_connector_state *new_con_state;
6681         struct amdgpu_dm_connector *aconnector;
6682         struct dm_connector_state *dm_conn_state;
6683         int i, j, clock, bpp;
6684         int vcpi, pbn_div, pbn = 0;
6685
6686         for_each_new_connector_in_state(state, connector, new_con_state, i) {
6687
6688                 aconnector = to_amdgpu_dm_connector(connector);
6689
6690                 if (!aconnector->port)
6691                         continue;
6692
6693                 if (!new_con_state || !new_con_state->crtc)
6694                         continue;
6695
6696                 dm_conn_state = to_dm_connector_state(new_con_state);
6697
6698                 for (j = 0; j < dc_state->stream_count; j++) {
6699                         stream = dc_state->streams[j];
6700                         if (!stream)
6701                                 continue;
6702
6703                         if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6704                                 break;
6705
6706                         stream = NULL;
6707                 }
6708
6709                 if (!stream)
6710                         continue;
6711
6712                 if (stream->timing.flags.DSC != 1) {
6713                         drm_dp_mst_atomic_enable_dsc(state,
6714                                                      aconnector->port,
6715                                                      dm_conn_state->pbn,
6716                                                      0,
6717                                                      false);
6718                         continue;
6719                 }
6720
6721                 pbn_div = dm_mst_get_pbn_divider(stream->link);
6722                 bpp = stream->timing.dsc_cfg.bits_per_pixel;
6723                 clock = stream->timing.pix_clk_100hz / 10;
6724                 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6725                 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6726                                                     aconnector->port,
6727                                                     pbn, pbn_div,
6728                                                     true);
6729                 if (vcpi < 0)
6730                         return vcpi;
6731
6732                 dm_conn_state->pbn = pbn;
6733                 dm_conn_state->vcpi_slots = vcpi;
6734         }
6735         return 0;
6736 }
6737 #endif
6738
6739 static void dm_drm_plane_reset(struct drm_plane *plane)
6740 {
6741         struct dm_plane_state *amdgpu_state = NULL;
6742
6743         if (plane->state)
6744                 plane->funcs->atomic_destroy_state(plane, plane->state);
6745
6746         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6747         WARN_ON(amdgpu_state == NULL);
6748
6749         if (amdgpu_state)
6750                 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6751 }
6752
6753 static struct drm_plane_state *
6754 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6755 {
6756         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6757
6758         old_dm_plane_state = to_dm_plane_state(plane->state);
6759         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6760         if (!dm_plane_state)
6761                 return NULL;
6762
6763         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6764
6765         if (old_dm_plane_state->dc_state) {
6766                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6767                 dc_plane_state_retain(dm_plane_state->dc_state);
6768         }
6769
6770         return &dm_plane_state->base;
6771 }
6772
6773 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6774                                 struct drm_plane_state *state)
6775 {
6776         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6777
6778         if (dm_plane_state->dc_state)
6779                 dc_plane_state_release(dm_plane_state->dc_state);
6780
6781         drm_atomic_helper_plane_destroy_state(plane, state);
6782 }
6783
6784 static const struct drm_plane_funcs dm_plane_funcs = {
6785         .update_plane   = drm_atomic_helper_update_plane,
6786         .disable_plane  = drm_atomic_helper_disable_plane,
6787         .destroy        = drm_primary_helper_destroy,
6788         .reset = dm_drm_plane_reset,
6789         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6790         .atomic_destroy_state = dm_drm_plane_destroy_state,
6791         .format_mod_supported = dm_plane_format_mod_supported,
6792 };
6793
6794 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6795                                       struct drm_plane_state *new_state)
6796 {
6797         struct amdgpu_framebuffer *afb;
6798         struct drm_gem_object *obj;
6799         struct amdgpu_device *adev;
6800         struct amdgpu_bo *rbo;
6801         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6802         struct list_head list;
6803         struct ttm_validate_buffer tv;
6804         struct ww_acquire_ctx ticket;
6805         uint32_t domain;
6806         int r;
6807
6808         if (!new_state->fb) {
6809                 DRM_DEBUG_KMS("No FB bound\n");
6810                 return 0;
6811         }
6812
6813         afb = to_amdgpu_framebuffer(new_state->fb);
6814         obj = new_state->fb->obj[0];
6815         rbo = gem_to_amdgpu_bo(obj);
6816         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6817         INIT_LIST_HEAD(&list);
6818
6819         tv.bo = &rbo->tbo;
6820         tv.num_shared = 1;
6821         list_add(&tv.head, &list);
6822
6823         r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6824         if (r) {
6825                 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6826                 return r;
6827         }
6828
6829         if (plane->type != DRM_PLANE_TYPE_CURSOR)
6830                 domain = amdgpu_display_supported_domains(adev, rbo->flags);
6831         else
6832                 domain = AMDGPU_GEM_DOMAIN_VRAM;
6833
6834         r = amdgpu_bo_pin(rbo, domain);
6835         if (unlikely(r != 0)) {
6836                 if (r != -ERESTARTSYS)
6837                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6838                 ttm_eu_backoff_reservation(&ticket, &list);
6839                 return r;
6840         }
6841
6842         r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6843         if (unlikely(r != 0)) {
6844                 amdgpu_bo_unpin(rbo);
6845                 ttm_eu_backoff_reservation(&ticket, &list);
6846                 DRM_ERROR("%p bind failed\n", rbo);
6847                 return r;
6848         }
6849
6850         ttm_eu_backoff_reservation(&ticket, &list);
6851
6852         afb->address = amdgpu_bo_gpu_offset(rbo);
6853
6854         amdgpu_bo_ref(rbo);
6855
6856         /**
6857          * We don't do surface updates on planes that have been newly created,
6858          * but we also don't have the afb->address during atomic check.
6859          *
6860          * Fill in buffer attributes depending on the address here, but only on
6861          * newly created planes since they're not being used by DC yet and this
6862          * won't modify global state.
6863          */
6864         dm_plane_state_old = to_dm_plane_state(plane->state);
6865         dm_plane_state_new = to_dm_plane_state(new_state);
6866
6867         if (dm_plane_state_new->dc_state &&
6868             dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6869                 struct dc_plane_state *plane_state =
6870                         dm_plane_state_new->dc_state;
6871                 bool force_disable_dcc = !plane_state->dcc.enable;
6872
6873                 fill_plane_buffer_attributes(
6874                         adev, afb, plane_state->format, plane_state->rotation,
6875                         afb->tiling_flags,
6876                         &plane_state->tiling_info, &plane_state->plane_size,
6877                         &plane_state->dcc, &plane_state->address,
6878                         afb->tmz_surface, force_disable_dcc);
6879         }
6880
6881         return 0;
6882 }
6883
6884 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6885                                        struct drm_plane_state *old_state)
6886 {
6887         struct amdgpu_bo *rbo;
6888         int r;
6889
6890         if (!old_state->fb)
6891                 return;
6892
6893         rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6894         r = amdgpu_bo_reserve(rbo, false);
6895         if (unlikely(r)) {
6896                 DRM_ERROR("failed to reserve rbo before unpin\n");
6897                 return;
6898         }
6899
6900         amdgpu_bo_unpin(rbo);
6901         amdgpu_bo_unreserve(rbo);
6902         amdgpu_bo_unref(&rbo);
6903 }
6904
6905 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6906                                        struct drm_crtc_state *new_crtc_state)
6907 {
6908         struct drm_framebuffer *fb = state->fb;
6909         int min_downscale, max_upscale;
6910         int min_scale = 0;
6911         int max_scale = INT_MAX;
6912
6913         /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6914         if (fb && state->crtc) {
6915                 /* Validate viewport to cover the case when only the position changes */
6916                 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
6917                         int viewport_width = state->crtc_w;
6918                         int viewport_height = state->crtc_h;
6919
6920                         if (state->crtc_x < 0)
6921                                 viewport_width += state->crtc_x;
6922                         else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
6923                                 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
6924
6925                         if (state->crtc_y < 0)
6926                                 viewport_height += state->crtc_y;
6927                         else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
6928                                 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
6929
6930                         if (viewport_width < 0 || viewport_height < 0) {
6931                                 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
6932                                 return -EINVAL;
6933                         } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
6934                                 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
6935                                 return -EINVAL;
6936                         } else if (viewport_height < MIN_VIEWPORT_SIZE) {
6937                                 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
6938                                 return -EINVAL;
6939                         }
6940
6941                 }
6942
6943                 /* Get min/max allowed scaling factors from plane caps. */
6944                 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6945                                              &min_downscale, &max_upscale);
6946                 /*
6947                  * Convert to drm convention: 16.16 fixed point, instead of dc's
6948                  * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6949                  * dst/src, so min_scale = 1.0 / max_upscale, etc.
6950                  */
6951                 min_scale = (1000 << 16) / max_upscale;
6952                 max_scale = (1000 << 16) / min_downscale;
6953         }
6954
6955         return drm_atomic_helper_check_plane_state(
6956                 state, new_crtc_state, min_scale, max_scale, true, true);
6957 }
6958
6959 static int dm_plane_atomic_check(struct drm_plane *plane,
6960                                  struct drm_atomic_state *state)
6961 {
6962         struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
6963                                                                                  plane);
6964         struct amdgpu_device *adev = drm_to_adev(plane->dev);
6965         struct dc *dc = adev->dm.dc;
6966         struct dm_plane_state *dm_plane_state;
6967         struct dc_scaling_info scaling_info;
6968         struct drm_crtc_state *new_crtc_state;
6969         int ret;
6970
6971         trace_amdgpu_dm_plane_atomic_check(new_plane_state);
6972
6973         dm_plane_state = to_dm_plane_state(new_plane_state);
6974
6975         if (!dm_plane_state->dc_state)
6976                 return 0;
6977
6978         new_crtc_state =
6979                 drm_atomic_get_new_crtc_state(state,
6980                                               new_plane_state->crtc);
6981         if (!new_crtc_state)
6982                 return -EINVAL;
6983
6984         ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
6985         if (ret)
6986                 return ret;
6987
6988         ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
6989         if (ret)
6990                 return ret;
6991
6992         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6993                 return 0;
6994
6995         return -EINVAL;
6996 }
6997
6998 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6999                                        struct drm_atomic_state *state)
7000 {
7001         /* Only support async updates on cursor planes. */
7002         if (plane->type != DRM_PLANE_TYPE_CURSOR)
7003                 return -EINVAL;
7004
7005         return 0;
7006 }
7007
7008 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7009                                          struct drm_atomic_state *state)
7010 {
7011         struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7012                                                                            plane);
7013         struct drm_plane_state *old_state =
7014                 drm_atomic_get_old_plane_state(state, plane);
7015
7016         trace_amdgpu_dm_atomic_update_cursor(new_state);
7017
7018         swap(plane->state->fb, new_state->fb);
7019
7020         plane->state->src_x = new_state->src_x;
7021         plane->state->src_y = new_state->src_y;
7022         plane->state->src_w = new_state->src_w;
7023         plane->state->src_h = new_state->src_h;
7024         plane->state->crtc_x = new_state->crtc_x;
7025         plane->state->crtc_y = new_state->crtc_y;
7026         plane->state->crtc_w = new_state->crtc_w;
7027         plane->state->crtc_h = new_state->crtc_h;
7028
7029         handle_cursor_update(plane, old_state);
7030 }
7031
7032 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7033         .prepare_fb = dm_plane_helper_prepare_fb,
7034         .cleanup_fb = dm_plane_helper_cleanup_fb,
7035         .atomic_check = dm_plane_atomic_check,
7036         .atomic_async_check = dm_plane_atomic_async_check,
7037         .atomic_async_update = dm_plane_atomic_async_update
7038 };
7039
7040 /*
7041  * TODO: these are currently initialized to rgb formats only.
7042  * For future use cases we should either initialize them dynamically based on
7043  * plane capabilities, or initialize this array to all formats, so internal drm
7044  * check will succeed, and let DC implement proper check
7045  */
7046 static const uint32_t rgb_formats[] = {
7047         DRM_FORMAT_XRGB8888,
7048         DRM_FORMAT_ARGB8888,
7049         DRM_FORMAT_RGBA8888,
7050         DRM_FORMAT_XRGB2101010,
7051         DRM_FORMAT_XBGR2101010,
7052         DRM_FORMAT_ARGB2101010,
7053         DRM_FORMAT_ABGR2101010,
7054         DRM_FORMAT_XBGR8888,
7055         DRM_FORMAT_ABGR8888,
7056         DRM_FORMAT_RGB565,
7057 };
7058
7059 static const uint32_t overlay_formats[] = {
7060         DRM_FORMAT_XRGB8888,
7061         DRM_FORMAT_ARGB8888,
7062         DRM_FORMAT_RGBA8888,
7063         DRM_FORMAT_XBGR8888,
7064         DRM_FORMAT_ABGR8888,
7065         DRM_FORMAT_RGB565
7066 };
7067
7068 static const u32 cursor_formats[] = {
7069         DRM_FORMAT_ARGB8888
7070 };
7071
7072 static int get_plane_formats(const struct drm_plane *plane,
7073                              const struct dc_plane_cap *plane_cap,
7074                              uint32_t *formats, int max_formats)
7075 {
7076         int i, num_formats = 0;
7077
7078         /*
7079          * TODO: Query support for each group of formats directly from
7080          * DC plane caps. This will require adding more formats to the
7081          * caps list.
7082          */
7083
7084         switch (plane->type) {
7085         case DRM_PLANE_TYPE_PRIMARY:
7086                 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7087                         if (num_formats >= max_formats)
7088                                 break;
7089
7090                         formats[num_formats++] = rgb_formats[i];
7091                 }
7092
7093                 if (plane_cap && plane_cap->pixel_format_support.nv12)
7094                         formats[num_formats++] = DRM_FORMAT_NV12;
7095                 if (plane_cap && plane_cap->pixel_format_support.p010)
7096                         formats[num_formats++] = DRM_FORMAT_P010;
7097                 if (plane_cap && plane_cap->pixel_format_support.fp16) {
7098                         formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7099                         formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7100                         formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7101                         formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7102                 }
7103                 break;
7104
7105         case DRM_PLANE_TYPE_OVERLAY:
7106                 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7107                         if (num_formats >= max_formats)
7108                                 break;
7109
7110                         formats[num_formats++] = overlay_formats[i];
7111                 }
7112                 break;
7113
7114         case DRM_PLANE_TYPE_CURSOR:
7115                 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7116                         if (num_formats >= max_formats)
7117                                 break;
7118
7119                         formats[num_formats++] = cursor_formats[i];
7120                 }
7121                 break;
7122         }
7123
7124         return num_formats;
7125 }
7126
7127 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7128                                 struct drm_plane *plane,
7129                                 unsigned long possible_crtcs,
7130                                 const struct dc_plane_cap *plane_cap)
7131 {
7132         uint32_t formats[32];
7133         int num_formats;
7134         int res = -EPERM;
7135         unsigned int supported_rotations;
7136         uint64_t *modifiers = NULL;
7137
7138         num_formats = get_plane_formats(plane, plane_cap, formats,
7139                                         ARRAY_SIZE(formats));
7140
7141         res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7142         if (res)
7143                 return res;
7144
7145         res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7146                                        &dm_plane_funcs, formats, num_formats,
7147                                        modifiers, plane->type, NULL);
7148         kfree(modifiers);
7149         if (res)
7150                 return res;
7151
7152         if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7153             plane_cap && plane_cap->per_pixel_alpha) {
7154                 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7155                                           BIT(DRM_MODE_BLEND_PREMULTI);
7156
7157                 drm_plane_create_alpha_property(plane);
7158                 drm_plane_create_blend_mode_property(plane, blend_caps);
7159         }
7160
7161         if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7162             plane_cap &&
7163             (plane_cap->pixel_format_support.nv12 ||
7164              plane_cap->pixel_format_support.p010)) {
7165                 /* This only affects YUV formats. */
7166                 drm_plane_create_color_properties(
7167                         plane,
7168                         BIT(DRM_COLOR_YCBCR_BT601) |
7169                         BIT(DRM_COLOR_YCBCR_BT709) |
7170                         BIT(DRM_COLOR_YCBCR_BT2020),
7171                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7172                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7173                         DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7174         }
7175
7176         supported_rotations =
7177                 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7178                 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7179
7180         if (dm->adev->asic_type >= CHIP_BONAIRE &&
7181             plane->type != DRM_PLANE_TYPE_CURSOR)
7182                 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7183                                                    supported_rotations);
7184
7185         drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7186
7187         /* Create (reset) the plane state */
7188         if (plane->funcs->reset)
7189                 plane->funcs->reset(plane);
7190
7191         return 0;
7192 }
7193
7194 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7195                                struct drm_plane *plane,
7196                                uint32_t crtc_index)
7197 {
7198         struct amdgpu_crtc *acrtc = NULL;
7199         struct drm_plane *cursor_plane;
7200
7201         int res = -ENOMEM;
7202
7203         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7204         if (!cursor_plane)
7205                 goto fail;
7206
7207         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7208         res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7209
7210         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7211         if (!acrtc)
7212                 goto fail;
7213
7214         res = drm_crtc_init_with_planes(
7215                         dm->ddev,
7216                         &acrtc->base,
7217                         plane,
7218                         cursor_plane,
7219                         &amdgpu_dm_crtc_funcs, NULL);
7220
7221         if (res)
7222                 goto fail;
7223
7224         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7225
7226         /* Create (reset) the plane state */
7227         if (acrtc->base.funcs->reset)
7228                 acrtc->base.funcs->reset(&acrtc->base);
7229
7230         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7231         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7232
7233         acrtc->crtc_id = crtc_index;
7234         acrtc->base.enabled = false;
7235         acrtc->otg_inst = -1;
7236
7237         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7238         drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7239                                    true, MAX_COLOR_LUT_ENTRIES);
7240         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7241
7242         return 0;
7243
7244 fail:
7245         kfree(acrtc);
7246         kfree(cursor_plane);
7247         return res;
7248 }
7249
7250
7251 static int to_drm_connector_type(enum signal_type st)
7252 {
7253         switch (st) {
7254         case SIGNAL_TYPE_HDMI_TYPE_A:
7255                 return DRM_MODE_CONNECTOR_HDMIA;
7256         case SIGNAL_TYPE_EDP:
7257                 return DRM_MODE_CONNECTOR_eDP;
7258         case SIGNAL_TYPE_LVDS:
7259                 return DRM_MODE_CONNECTOR_LVDS;
7260         case SIGNAL_TYPE_RGB:
7261                 return DRM_MODE_CONNECTOR_VGA;
7262         case SIGNAL_TYPE_DISPLAY_PORT:
7263         case SIGNAL_TYPE_DISPLAY_PORT_MST:
7264                 return DRM_MODE_CONNECTOR_DisplayPort;
7265         case SIGNAL_TYPE_DVI_DUAL_LINK:
7266         case SIGNAL_TYPE_DVI_SINGLE_LINK:
7267                 return DRM_MODE_CONNECTOR_DVID;
7268         case SIGNAL_TYPE_VIRTUAL:
7269                 return DRM_MODE_CONNECTOR_VIRTUAL;
7270
7271         default:
7272                 return DRM_MODE_CONNECTOR_Unknown;
7273         }
7274 }
7275
7276 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7277 {
7278         struct drm_encoder *encoder;
7279
7280         /* There is only one encoder per connector */
7281         drm_connector_for_each_possible_encoder(connector, encoder)
7282                 return encoder;
7283
7284         return NULL;
7285 }
7286
7287 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7288 {
7289         struct drm_encoder *encoder;
7290         struct amdgpu_encoder *amdgpu_encoder;
7291
7292         encoder = amdgpu_dm_connector_to_encoder(connector);
7293
7294         if (encoder == NULL)
7295                 return;
7296
7297         amdgpu_encoder = to_amdgpu_encoder(encoder);
7298
7299         amdgpu_encoder->native_mode.clock = 0;
7300
7301         if (!list_empty(&connector->probed_modes)) {
7302                 struct drm_display_mode *preferred_mode = NULL;
7303
7304                 list_for_each_entry(preferred_mode,
7305                                     &connector->probed_modes,
7306                                     head) {
7307                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7308                                 amdgpu_encoder->native_mode = *preferred_mode;
7309
7310                         break;
7311                 }
7312
7313         }
7314 }
7315
7316 static struct drm_display_mode *
7317 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7318                              char *name,
7319                              int hdisplay, int vdisplay)
7320 {
7321         struct drm_device *dev = encoder->dev;
7322         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7323         struct drm_display_mode *mode = NULL;
7324         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7325
7326         mode = drm_mode_duplicate(dev, native_mode);
7327
7328         if (mode == NULL)
7329                 return NULL;
7330
7331         mode->hdisplay = hdisplay;
7332         mode->vdisplay = vdisplay;
7333         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7334         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7335
7336         return mode;
7337
7338 }
7339
7340 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7341                                                  struct drm_connector *connector)
7342 {
7343         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7344         struct drm_display_mode *mode = NULL;
7345         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7346         struct amdgpu_dm_connector *amdgpu_dm_connector =
7347                                 to_amdgpu_dm_connector(connector);
7348         int i;
7349         int n;
7350         struct mode_size {
7351                 char name[DRM_DISPLAY_MODE_LEN];
7352                 int w;
7353                 int h;
7354         } common_modes[] = {
7355                 {  "640x480",  640,  480},
7356                 {  "800x600",  800,  600},
7357                 { "1024x768", 1024,  768},
7358                 { "1280x720", 1280,  720},
7359                 { "1280x800", 1280,  800},
7360                 {"1280x1024", 1280, 1024},
7361                 { "1440x900", 1440,  900},
7362                 {"1680x1050", 1680, 1050},
7363                 {"1600x1200", 1600, 1200},
7364                 {"1920x1080", 1920, 1080},
7365                 {"1920x1200", 1920, 1200}
7366         };
7367
7368         n = ARRAY_SIZE(common_modes);
7369
7370         for (i = 0; i < n; i++) {
7371                 struct drm_display_mode *curmode = NULL;
7372                 bool mode_existed = false;
7373
7374                 if (common_modes[i].w > native_mode->hdisplay ||
7375                     common_modes[i].h > native_mode->vdisplay ||
7376                    (common_modes[i].w == native_mode->hdisplay &&
7377                     common_modes[i].h == native_mode->vdisplay))
7378                         continue;
7379
7380                 list_for_each_entry(curmode, &connector->probed_modes, head) {
7381                         if (common_modes[i].w == curmode->hdisplay &&
7382                             common_modes[i].h == curmode->vdisplay) {
7383                                 mode_existed = true;
7384                                 break;
7385                         }
7386                 }
7387
7388                 if (mode_existed)
7389                         continue;
7390
7391                 mode = amdgpu_dm_create_common_mode(encoder,
7392                                 common_modes[i].name, common_modes[i].w,
7393                                 common_modes[i].h);
7394                 drm_mode_probed_add(connector, mode);
7395                 amdgpu_dm_connector->num_modes++;
7396         }
7397 }
7398
7399 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7400                                               struct edid *edid)
7401 {
7402         struct amdgpu_dm_connector *amdgpu_dm_connector =
7403                         to_amdgpu_dm_connector(connector);
7404
7405         if (edid) {
7406                 /* empty probed_modes */
7407                 INIT_LIST_HEAD(&connector->probed_modes);
7408                 amdgpu_dm_connector->num_modes =
7409                                 drm_add_edid_modes(connector, edid);
7410
7411                 /* sorting the probed modes before calling function
7412                  * amdgpu_dm_get_native_mode() since EDID can have
7413                  * more than one preferred mode. The modes that are
7414                  * later in the probed mode list could be of higher
7415                  * and preferred resolution. For example, 3840x2160
7416                  * resolution in base EDID preferred timing and 4096x2160
7417                  * preferred resolution in DID extension block later.
7418                  */
7419                 drm_mode_sort(&connector->probed_modes);
7420                 amdgpu_dm_get_native_mode(connector);
7421
7422                 /* Freesync capabilities are reset by calling
7423                  * drm_add_edid_modes() and need to be
7424                  * restored here.
7425                  */
7426                 amdgpu_dm_update_freesync_caps(connector, edid);
7427         } else {
7428                 amdgpu_dm_connector->num_modes = 0;
7429         }
7430 }
7431
7432 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7433                               struct drm_display_mode *mode)
7434 {
7435         struct drm_display_mode *m;
7436
7437         list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7438                 if (drm_mode_equal(m, mode))
7439                         return true;
7440         }
7441
7442         return false;
7443 }
7444
7445 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7446 {
7447         const struct drm_display_mode *m;
7448         struct drm_display_mode *new_mode;
7449         uint i;
7450         uint32_t new_modes_count = 0;
7451
7452         /* Standard FPS values
7453          *
7454          * 23.976   - TV/NTSC
7455          * 24       - Cinema
7456          * 25       - TV/PAL
7457          * 29.97    - TV/NTSC
7458          * 30       - TV/NTSC
7459          * 48       - Cinema HFR
7460          * 50       - TV/PAL
7461          * 60       - Commonly used
7462          * 48,72,96 - Multiples of 24
7463          */
7464         const uint32_t common_rates[] = { 23976, 24000, 25000, 29970, 30000,
7465                                          48000, 50000, 60000, 72000, 96000 };
7466
7467         /*
7468          * Find mode with highest refresh rate with the same resolution
7469          * as the preferred mode. Some monitors report a preferred mode
7470          * with lower resolution than the highest refresh rate supported.
7471          */
7472
7473         m = get_highest_refresh_rate_mode(aconnector, true);
7474         if (!m)
7475                 return 0;
7476
7477         for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7478                 uint64_t target_vtotal, target_vtotal_diff;
7479                 uint64_t num, den;
7480
7481                 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7482                         continue;
7483
7484                 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7485                     common_rates[i] > aconnector->max_vfreq * 1000)
7486                         continue;
7487
7488                 num = (unsigned long long)m->clock * 1000 * 1000;
7489                 den = common_rates[i] * (unsigned long long)m->htotal;
7490                 target_vtotal = div_u64(num, den);
7491                 target_vtotal_diff = target_vtotal - m->vtotal;
7492
7493                 /* Check for illegal modes */
7494                 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7495                     m->vsync_end + target_vtotal_diff < m->vsync_start ||
7496                     m->vtotal + target_vtotal_diff < m->vsync_end)
7497                         continue;
7498
7499                 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7500                 if (!new_mode)
7501                         goto out;
7502
7503                 new_mode->vtotal += (u16)target_vtotal_diff;
7504                 new_mode->vsync_start += (u16)target_vtotal_diff;
7505                 new_mode->vsync_end += (u16)target_vtotal_diff;
7506                 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7507                 new_mode->type |= DRM_MODE_TYPE_DRIVER;
7508
7509                 if (!is_duplicate_mode(aconnector, new_mode)) {
7510                         drm_mode_probed_add(&aconnector->base, new_mode);
7511                         new_modes_count += 1;
7512                 } else
7513                         drm_mode_destroy(aconnector->base.dev, new_mode);
7514         }
7515  out:
7516         return new_modes_count;
7517 }
7518
7519 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7520                                                    struct edid *edid)
7521 {
7522         struct amdgpu_dm_connector *amdgpu_dm_connector =
7523                 to_amdgpu_dm_connector(connector);
7524
7525         if (!(amdgpu_freesync_vid_mode && edid))
7526                 return;
7527
7528         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7529                 amdgpu_dm_connector->num_modes +=
7530                         add_fs_modes(amdgpu_dm_connector);
7531 }
7532
7533 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
7534 {
7535         struct amdgpu_dm_connector *amdgpu_dm_connector =
7536                         to_amdgpu_dm_connector(connector);
7537         struct drm_encoder *encoder;
7538         struct edid *edid = amdgpu_dm_connector->edid;
7539
7540         encoder = amdgpu_dm_connector_to_encoder(connector);
7541
7542         if (!drm_edid_is_valid(edid)) {
7543                 amdgpu_dm_connector->num_modes =
7544                                 drm_add_modes_noedid(connector, 640, 480);
7545         } else {
7546                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
7547                 amdgpu_dm_connector_add_common_modes(encoder, connector);
7548                 amdgpu_dm_connector_add_freesync_modes(connector, edid);
7549         }
7550         amdgpu_dm_fbc_init(connector);
7551
7552         return amdgpu_dm_connector->num_modes;
7553 }
7554
7555 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7556                                      struct amdgpu_dm_connector *aconnector,
7557                                      int connector_type,
7558                                      struct dc_link *link,
7559                                      int link_index)
7560 {
7561         struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7562
7563         /*
7564          * Some of the properties below require access to state, like bpc.
7565          * Allocate some default initial connector state with our reset helper.
7566          */
7567         if (aconnector->base.funcs->reset)
7568                 aconnector->base.funcs->reset(&aconnector->base);
7569
7570         aconnector->connector_id = link_index;
7571         aconnector->dc_link = link;
7572         aconnector->base.interlace_allowed = false;
7573         aconnector->base.doublescan_allowed = false;
7574         aconnector->base.stereo_allowed = false;
7575         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7576         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7577         aconnector->audio_inst = -1;
7578         mutex_init(&aconnector->hpd_lock);
7579
7580         /*
7581          * configure support HPD hot plug connector_>polled default value is 0
7582          * which means HPD hot plug not supported
7583          */
7584         switch (connector_type) {
7585         case DRM_MODE_CONNECTOR_HDMIA:
7586                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7587                 aconnector->base.ycbcr_420_allowed =
7588                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7589                 break;
7590         case DRM_MODE_CONNECTOR_DisplayPort:
7591                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7592                 aconnector->base.ycbcr_420_allowed =
7593                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
7594                 break;
7595         case DRM_MODE_CONNECTOR_DVID:
7596                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7597                 break;
7598         default:
7599                 break;
7600         }
7601
7602         drm_object_attach_property(&aconnector->base.base,
7603                                 dm->ddev->mode_config.scaling_mode_property,
7604                                 DRM_MODE_SCALE_NONE);
7605
7606         drm_object_attach_property(&aconnector->base.base,
7607                                 adev->mode_info.underscan_property,
7608                                 UNDERSCAN_OFF);
7609         drm_object_attach_property(&aconnector->base.base,
7610                                 adev->mode_info.underscan_hborder_property,
7611                                 0);
7612         drm_object_attach_property(&aconnector->base.base,
7613                                 adev->mode_info.underscan_vborder_property,
7614                                 0);
7615
7616         if (!aconnector->mst_port)
7617                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7618
7619         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
7620         aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7621         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7622
7623         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7624             (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7625                 drm_object_attach_property(&aconnector->base.base,
7626                                 adev->mode_info.abm_level_property, 0);
7627         }
7628
7629         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7630             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7631             connector_type == DRM_MODE_CONNECTOR_eDP) {
7632                 drm_object_attach_property(
7633                         &aconnector->base.base,
7634                         dm->ddev->mode_config.hdr_output_metadata_property, 0);
7635
7636                 if (!aconnector->mst_port)
7637                         drm_connector_attach_vrr_capable_property(&aconnector->base);
7638
7639 #ifdef CONFIG_DRM_AMD_DC_HDCP
7640                 if (adev->dm.hdcp_workqueue)
7641                         drm_connector_attach_content_protection_property(&aconnector->base, true);
7642 #endif
7643         }
7644 }
7645
7646 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7647                               struct i2c_msg *msgs, int num)
7648 {
7649         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7650         struct ddc_service *ddc_service = i2c->ddc_service;
7651         struct i2c_command cmd;
7652         int i;
7653         int result = -EIO;
7654
7655         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7656
7657         if (!cmd.payloads)
7658                 return result;
7659
7660         cmd.number_of_payloads = num;
7661         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7662         cmd.speed = 100;
7663
7664         for (i = 0; i < num; i++) {
7665                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7666                 cmd.payloads[i].address = msgs[i].addr;
7667                 cmd.payloads[i].length = msgs[i].len;
7668                 cmd.payloads[i].data = msgs[i].buf;
7669         }
7670
7671         if (dc_submit_i2c(
7672                         ddc_service->ctx->dc,
7673                         ddc_service->ddc_pin->hw_info.ddc_channel,
7674                         &cmd))
7675                 result = num;
7676
7677         kfree(cmd.payloads);
7678         return result;
7679 }
7680
7681 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7682 {
7683         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7684 }
7685
7686 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7687         .master_xfer = amdgpu_dm_i2c_xfer,
7688         .functionality = amdgpu_dm_i2c_func,
7689 };
7690
7691 static struct amdgpu_i2c_adapter *
7692 create_i2c(struct ddc_service *ddc_service,
7693            int link_index,
7694            int *res)
7695 {
7696         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7697         struct amdgpu_i2c_adapter *i2c;
7698
7699         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7700         if (!i2c)
7701                 return NULL;
7702         i2c->base.owner = THIS_MODULE;
7703         i2c->base.class = I2C_CLASS_DDC;
7704         i2c->base.dev.parent = &adev->pdev->dev;
7705         i2c->base.algo = &amdgpu_dm_i2c_algo;
7706         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7707         i2c_set_adapdata(&i2c->base, i2c);
7708         i2c->ddc_service = ddc_service;
7709         i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7710
7711         return i2c;
7712 }
7713
7714
7715 /*
7716  * Note: this function assumes that dc_link_detect() was called for the
7717  * dc_link which will be represented by this aconnector.
7718  */
7719 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7720                                     struct amdgpu_dm_connector *aconnector,
7721                                     uint32_t link_index,
7722                                     struct amdgpu_encoder *aencoder)
7723 {
7724         int res = 0;
7725         int connector_type;
7726         struct dc *dc = dm->dc;
7727         struct dc_link *link = dc_get_link_at_index(dc, link_index);
7728         struct amdgpu_i2c_adapter *i2c;
7729
7730         link->priv = aconnector;
7731
7732         DRM_DEBUG_DRIVER("%s()\n", __func__);
7733
7734         i2c = create_i2c(link->ddc, link->link_index, &res);
7735         if (!i2c) {
7736                 DRM_ERROR("Failed to create i2c adapter data\n");
7737                 return -ENOMEM;
7738         }
7739
7740         aconnector->i2c = i2c;
7741         res = i2c_add_adapter(&i2c->base);
7742
7743         if (res) {
7744                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7745                 goto out_free;
7746         }
7747
7748         connector_type = to_drm_connector_type(link->connector_signal);
7749
7750         res = drm_connector_init_with_ddc(
7751                         dm->ddev,
7752                         &aconnector->base,
7753                         &amdgpu_dm_connector_funcs,
7754                         connector_type,
7755                         &i2c->base);
7756
7757         if (res) {
7758                 DRM_ERROR("connector_init failed\n");
7759                 aconnector->connector_id = -1;
7760                 goto out_free;
7761         }
7762
7763         drm_connector_helper_add(
7764                         &aconnector->base,
7765                         &amdgpu_dm_connector_helper_funcs);
7766
7767         amdgpu_dm_connector_init_helper(
7768                 dm,
7769                 aconnector,
7770                 connector_type,
7771                 link,
7772                 link_index);
7773
7774         drm_connector_attach_encoder(
7775                 &aconnector->base, &aencoder->base);
7776
7777         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7778                 || connector_type == DRM_MODE_CONNECTOR_eDP)
7779                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7780
7781 out_free:
7782         if (res) {
7783                 kfree(i2c);
7784                 aconnector->i2c = NULL;
7785         }
7786         return res;
7787 }
7788
7789 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7790 {
7791         switch (adev->mode_info.num_crtc) {
7792         case 1:
7793                 return 0x1;
7794         case 2:
7795                 return 0x3;
7796         case 3:
7797                 return 0x7;
7798         case 4:
7799                 return 0xf;
7800         case 5:
7801                 return 0x1f;
7802         case 6:
7803         default:
7804                 return 0x3f;
7805         }
7806 }
7807
7808 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7809                                   struct amdgpu_encoder *aencoder,
7810                                   uint32_t link_index)
7811 {
7812         struct amdgpu_device *adev = drm_to_adev(dev);
7813
7814         int res = drm_encoder_init(dev,
7815                                    &aencoder->base,
7816                                    &amdgpu_dm_encoder_funcs,
7817                                    DRM_MODE_ENCODER_TMDS,
7818                                    NULL);
7819
7820         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7821
7822         if (!res)
7823                 aencoder->encoder_id = link_index;
7824         else
7825                 aencoder->encoder_id = -1;
7826
7827         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7828
7829         return res;
7830 }
7831
7832 static void manage_dm_interrupts(struct amdgpu_device *adev,
7833                                  struct amdgpu_crtc *acrtc,
7834                                  bool enable)
7835 {
7836         /*
7837          * We have no guarantee that the frontend index maps to the same
7838          * backend index - some even map to more than one.
7839          *
7840          * TODO: Use a different interrupt or check DC itself for the mapping.
7841          */
7842         int irq_type =
7843                 amdgpu_display_crtc_idx_to_irq_type(
7844                         adev,
7845                         acrtc->crtc_id);
7846
7847         if (enable) {
7848                 drm_crtc_vblank_on(&acrtc->base);
7849                 amdgpu_irq_get(
7850                         adev,
7851                         &adev->pageflip_irq,
7852                         irq_type);
7853 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7854                 amdgpu_irq_get(
7855                         adev,
7856                         &adev->vline0_irq,
7857                         irq_type);
7858 #endif
7859         } else {
7860 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7861                 amdgpu_irq_put(
7862                         adev,
7863                         &adev->vline0_irq,
7864                         irq_type);
7865 #endif
7866                 amdgpu_irq_put(
7867                         adev,
7868                         &adev->pageflip_irq,
7869                         irq_type);
7870                 drm_crtc_vblank_off(&acrtc->base);
7871         }
7872 }
7873
7874 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7875                                       struct amdgpu_crtc *acrtc)
7876 {
7877         int irq_type =
7878                 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7879
7880         /**
7881          * This reads the current state for the IRQ and force reapplies
7882          * the setting to hardware.
7883          */
7884         amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7885 }
7886
7887 static bool
7888 is_scaling_state_different(const struct dm_connector_state *dm_state,
7889                            const struct dm_connector_state *old_dm_state)
7890 {
7891         if (dm_state->scaling != old_dm_state->scaling)
7892                 return true;
7893         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7894                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7895                         return true;
7896         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7897                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7898                         return true;
7899         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7900                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7901                 return true;
7902         return false;
7903 }
7904
7905 #ifdef CONFIG_DRM_AMD_DC_HDCP
7906 static bool is_content_protection_different(struct drm_connector_state *state,
7907                                             const struct drm_connector_state *old_state,
7908                                             const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7909 {
7910         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7911         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7912
7913         /* Handle: Type0/1 change */
7914         if (old_state->hdcp_content_type != state->hdcp_content_type &&
7915             state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7916                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7917                 return true;
7918         }
7919
7920         /* CP is being re enabled, ignore this
7921          *
7922          * Handles:     ENABLED -> DESIRED
7923          */
7924         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7925             state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7926                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7927                 return false;
7928         }
7929
7930         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7931          *
7932          * Handles:     UNDESIRED -> ENABLED
7933          */
7934         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7935             state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7936                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7937
7938         /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7939          * hot-plug, headless s3, dpms
7940          *
7941          * Handles:     DESIRED -> DESIRED (Special case)
7942          */
7943         if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7944             connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7945                 dm_con_state->update_hdcp = false;
7946                 return true;
7947         }
7948
7949         /*
7950          * Handles:     UNDESIRED -> UNDESIRED
7951          *              DESIRED -> DESIRED
7952          *              ENABLED -> ENABLED
7953          */
7954         if (old_state->content_protection == state->content_protection)
7955                 return false;
7956
7957         /*
7958          * Handles:     UNDESIRED -> DESIRED
7959          *              DESIRED -> UNDESIRED
7960          *              ENABLED -> UNDESIRED
7961          */
7962         if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7963                 return true;
7964
7965         /*
7966          * Handles:     DESIRED -> ENABLED
7967          */
7968         return false;
7969 }
7970
7971 #endif
7972 static void remove_stream(struct amdgpu_device *adev,
7973                           struct amdgpu_crtc *acrtc,
7974                           struct dc_stream_state *stream)
7975 {
7976         /* this is the update mode case */
7977
7978         acrtc->otg_inst = -1;
7979         acrtc->enabled = false;
7980 }
7981
7982 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7983                                struct dc_cursor_position *position)
7984 {
7985         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7986         int x, y;
7987         int xorigin = 0, yorigin = 0;
7988
7989         if (!crtc || !plane->state->fb)
7990                 return 0;
7991
7992         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7993             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7994                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7995                           __func__,
7996                           plane->state->crtc_w,
7997                           plane->state->crtc_h);
7998                 return -EINVAL;
7999         }
8000
8001         x = plane->state->crtc_x;
8002         y = plane->state->crtc_y;
8003
8004         if (x <= -amdgpu_crtc->max_cursor_width ||
8005             y <= -amdgpu_crtc->max_cursor_height)
8006                 return 0;
8007
8008         if (x < 0) {
8009                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8010                 x = 0;
8011         }
8012         if (y < 0) {
8013                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8014                 y = 0;
8015         }
8016         position->enable = true;
8017         position->translate_by_source = true;
8018         position->x = x;
8019         position->y = y;
8020         position->x_hotspot = xorigin;
8021         position->y_hotspot = yorigin;
8022
8023         return 0;
8024 }
8025
8026 static void handle_cursor_update(struct drm_plane *plane,
8027                                  struct drm_plane_state *old_plane_state)
8028 {
8029         struct amdgpu_device *adev = drm_to_adev(plane->dev);
8030         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8031         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8032         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8033         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8034         uint64_t address = afb ? afb->address : 0;
8035         struct dc_cursor_position position = {0};
8036         struct dc_cursor_attributes attributes;
8037         int ret;
8038
8039         if (!plane->state->fb && !old_plane_state->fb)
8040                 return;
8041
8042         DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8043                       __func__,
8044                       amdgpu_crtc->crtc_id,
8045                       plane->state->crtc_w,
8046                       plane->state->crtc_h);
8047
8048         ret = get_cursor_position(plane, crtc, &position);
8049         if (ret)
8050                 return;
8051
8052         if (!position.enable) {
8053                 /* turn off cursor */
8054                 if (crtc_state && crtc_state->stream) {
8055                         mutex_lock(&adev->dm.dc_lock);
8056                         dc_stream_set_cursor_position(crtc_state->stream,
8057                                                       &position);
8058                         mutex_unlock(&adev->dm.dc_lock);
8059                 }
8060                 return;
8061         }
8062
8063         amdgpu_crtc->cursor_width = plane->state->crtc_w;
8064         amdgpu_crtc->cursor_height = plane->state->crtc_h;
8065
8066         memset(&attributes, 0, sizeof(attributes));
8067         attributes.address.high_part = upper_32_bits(address);
8068         attributes.address.low_part  = lower_32_bits(address);
8069         attributes.width             = plane->state->crtc_w;
8070         attributes.height            = plane->state->crtc_h;
8071         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8072         attributes.rotation_angle    = 0;
8073         attributes.attribute_flags.value = 0;
8074
8075         attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8076
8077         if (crtc_state->stream) {
8078                 mutex_lock(&adev->dm.dc_lock);
8079                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8080                                                          &attributes))
8081                         DRM_ERROR("DC failed to set cursor attributes\n");
8082
8083                 if (!dc_stream_set_cursor_position(crtc_state->stream,
8084                                                    &position))
8085                         DRM_ERROR("DC failed to set cursor position\n");
8086                 mutex_unlock(&adev->dm.dc_lock);
8087         }
8088 }
8089
8090 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8091 {
8092
8093         assert_spin_locked(&acrtc->base.dev->event_lock);
8094         WARN_ON(acrtc->event);
8095
8096         acrtc->event = acrtc->base.state->event;
8097
8098         /* Set the flip status */
8099         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8100
8101         /* Mark this event as consumed */
8102         acrtc->base.state->event = NULL;
8103
8104         DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8105                      acrtc->crtc_id);
8106 }
8107
8108 static void update_freesync_state_on_stream(
8109         struct amdgpu_display_manager *dm,
8110         struct dm_crtc_state *new_crtc_state,
8111         struct dc_stream_state *new_stream,
8112         struct dc_plane_state *surface,
8113         u32 flip_timestamp_in_us)
8114 {
8115         struct mod_vrr_params vrr_params;
8116         struct dc_info_packet vrr_infopacket = {0};
8117         struct amdgpu_device *adev = dm->adev;
8118         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8119         unsigned long flags;
8120         bool pack_sdp_v1_3 = false;
8121
8122         if (!new_stream)
8123                 return;
8124
8125         /*
8126          * TODO: Determine why min/max totals and vrefresh can be 0 here.
8127          * For now it's sufficient to just guard against these conditions.
8128          */
8129
8130         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8131                 return;
8132
8133         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8134         vrr_params = acrtc->dm_irq_params.vrr_params;
8135
8136         if (surface) {
8137                 mod_freesync_handle_preflip(
8138                         dm->freesync_module,
8139                         surface,
8140                         new_stream,
8141                         flip_timestamp_in_us,
8142                         &vrr_params);
8143
8144                 if (adev->family < AMDGPU_FAMILY_AI &&
8145                     amdgpu_dm_vrr_active(new_crtc_state)) {
8146                         mod_freesync_handle_v_update(dm->freesync_module,
8147                                                      new_stream, &vrr_params);
8148
8149                         /* Need to call this before the frame ends. */
8150                         dc_stream_adjust_vmin_vmax(dm->dc,
8151                                                    new_crtc_state->stream,
8152                                                    &vrr_params.adjust);
8153                 }
8154         }
8155
8156         mod_freesync_build_vrr_infopacket(
8157                 dm->freesync_module,
8158                 new_stream,
8159                 &vrr_params,
8160                 PACKET_TYPE_VRR,
8161                 TRANSFER_FUNC_UNKNOWN,
8162                 &vrr_infopacket,
8163                 pack_sdp_v1_3);
8164
8165         new_crtc_state->freesync_timing_changed |=
8166                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8167                         &vrr_params.adjust,
8168                         sizeof(vrr_params.adjust)) != 0);
8169
8170         new_crtc_state->freesync_vrr_info_changed |=
8171                 (memcmp(&new_crtc_state->vrr_infopacket,
8172                         &vrr_infopacket,
8173                         sizeof(vrr_infopacket)) != 0);
8174
8175         acrtc->dm_irq_params.vrr_params = vrr_params;
8176         new_crtc_state->vrr_infopacket = vrr_infopacket;
8177
8178         new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8179         new_stream->vrr_infopacket = vrr_infopacket;
8180
8181         if (new_crtc_state->freesync_vrr_info_changed)
8182                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8183                               new_crtc_state->base.crtc->base.id,
8184                               (int)new_crtc_state->base.vrr_enabled,
8185                               (int)vrr_params.state);
8186
8187         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8188 }
8189
8190 static void update_stream_irq_parameters(
8191         struct amdgpu_display_manager *dm,
8192         struct dm_crtc_state *new_crtc_state)
8193 {
8194         struct dc_stream_state *new_stream = new_crtc_state->stream;
8195         struct mod_vrr_params vrr_params;
8196         struct mod_freesync_config config = new_crtc_state->freesync_config;
8197         struct amdgpu_device *adev = dm->adev;
8198         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8199         unsigned long flags;
8200
8201         if (!new_stream)
8202                 return;
8203
8204         /*
8205          * TODO: Determine why min/max totals and vrefresh can be 0 here.
8206          * For now it's sufficient to just guard against these conditions.
8207          */
8208         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8209                 return;
8210
8211         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8212         vrr_params = acrtc->dm_irq_params.vrr_params;
8213
8214         if (new_crtc_state->vrr_supported &&
8215             config.min_refresh_in_uhz &&
8216             config.max_refresh_in_uhz) {
8217                 /*
8218                  * if freesync compatible mode was set, config.state will be set
8219                  * in atomic check
8220                  */
8221                 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8222                     (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8223                      new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8224                         vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8225                         vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8226                         vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8227                         vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8228                 } else {
8229                         config.state = new_crtc_state->base.vrr_enabled ?
8230                                                      VRR_STATE_ACTIVE_VARIABLE :
8231                                                      VRR_STATE_INACTIVE;
8232                 }
8233         } else {
8234                 config.state = VRR_STATE_UNSUPPORTED;
8235         }
8236
8237         mod_freesync_build_vrr_params(dm->freesync_module,
8238                                       new_stream,
8239                                       &config, &vrr_params);
8240
8241         new_crtc_state->freesync_timing_changed |=
8242                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8243                         &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8244
8245         new_crtc_state->freesync_config = config;
8246         /* Copy state for access from DM IRQ handler */
8247         acrtc->dm_irq_params.freesync_config = config;
8248         acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8249         acrtc->dm_irq_params.vrr_params = vrr_params;
8250         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8251 }
8252
8253 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8254                                             struct dm_crtc_state *new_state)
8255 {
8256         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8257         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8258
8259         if (!old_vrr_active && new_vrr_active) {
8260                 /* Transition VRR inactive -> active:
8261                  * While VRR is active, we must not disable vblank irq, as a
8262                  * reenable after disable would compute bogus vblank/pflip
8263                  * timestamps if it likely happened inside display front-porch.
8264                  *
8265                  * We also need vupdate irq for the actual core vblank handling
8266                  * at end of vblank.
8267                  */
8268                 dm_set_vupdate_irq(new_state->base.crtc, true);
8269                 drm_crtc_vblank_get(new_state->base.crtc);
8270                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8271                                  __func__, new_state->base.crtc->base.id);
8272         } else if (old_vrr_active && !new_vrr_active) {
8273                 /* Transition VRR active -> inactive:
8274                  * Allow vblank irq disable again for fixed refresh rate.
8275                  */
8276                 dm_set_vupdate_irq(new_state->base.crtc, false);
8277                 drm_crtc_vblank_put(new_state->base.crtc);
8278                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8279                                  __func__, new_state->base.crtc->base.id);
8280         }
8281 }
8282
8283 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8284 {
8285         struct drm_plane *plane;
8286         struct drm_plane_state *old_plane_state;
8287         int i;
8288
8289         /*
8290          * TODO: Make this per-stream so we don't issue redundant updates for
8291          * commits with multiple streams.
8292          */
8293         for_each_old_plane_in_state(state, plane, old_plane_state, i)
8294                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8295                         handle_cursor_update(plane, old_plane_state);
8296 }
8297
8298 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8299                                     struct dc_state *dc_state,
8300                                     struct drm_device *dev,
8301                                     struct amdgpu_display_manager *dm,
8302                                     struct drm_crtc *pcrtc,
8303                                     bool wait_for_vblank)
8304 {
8305         uint32_t i;
8306         uint64_t timestamp_ns;
8307         struct drm_plane *plane;
8308         struct drm_plane_state *old_plane_state, *new_plane_state;
8309         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8310         struct drm_crtc_state *new_pcrtc_state =
8311                         drm_atomic_get_new_crtc_state(state, pcrtc);
8312         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8313         struct dm_crtc_state *dm_old_crtc_state =
8314                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8315         int planes_count = 0, vpos, hpos;
8316         long r;
8317         unsigned long flags;
8318         struct amdgpu_bo *abo;
8319         uint32_t target_vblank, last_flip_vblank;
8320         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8321         bool pflip_present = false;
8322         struct {
8323                 struct dc_surface_update surface_updates[MAX_SURFACES];
8324                 struct dc_plane_info plane_infos[MAX_SURFACES];
8325                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8326                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8327                 struct dc_stream_update stream_update;
8328         } *bundle;
8329
8330         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8331
8332         if (!bundle) {
8333                 dm_error("Failed to allocate update bundle\n");
8334                 goto cleanup;
8335         }
8336
8337         /*
8338          * Disable the cursor first if we're disabling all the planes.
8339          * It'll remain on the screen after the planes are re-enabled
8340          * if we don't.
8341          */
8342         if (acrtc_state->active_planes == 0)
8343                 amdgpu_dm_commit_cursors(state);
8344
8345         /* update planes when needed */
8346         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8347                 struct drm_crtc *crtc = new_plane_state->crtc;
8348                 struct drm_crtc_state *new_crtc_state;
8349                 struct drm_framebuffer *fb = new_plane_state->fb;
8350                 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8351                 bool plane_needs_flip;
8352                 struct dc_plane_state *dc_plane;
8353                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8354
8355                 /* Cursor plane is handled after stream updates */
8356                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8357                         continue;
8358
8359                 if (!fb || !crtc || pcrtc != crtc)
8360                         continue;
8361
8362                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8363                 if (!new_crtc_state->active)
8364                         continue;
8365
8366                 dc_plane = dm_new_plane_state->dc_state;
8367
8368                 bundle->surface_updates[planes_count].surface = dc_plane;
8369                 if (new_pcrtc_state->color_mgmt_changed) {
8370                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8371                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8372                         bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8373                 }
8374
8375                 fill_dc_scaling_info(new_plane_state,
8376                                      &bundle->scaling_infos[planes_count]);
8377
8378                 bundle->surface_updates[planes_count].scaling_info =
8379                         &bundle->scaling_infos[planes_count];
8380
8381                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8382
8383                 pflip_present = pflip_present || plane_needs_flip;
8384
8385                 if (!plane_needs_flip) {
8386                         planes_count += 1;
8387                         continue;
8388                 }
8389
8390                 abo = gem_to_amdgpu_bo(fb->obj[0]);
8391
8392                 /*
8393                  * Wait for all fences on this FB. Do limited wait to avoid
8394                  * deadlock during GPU reset when this fence will not signal
8395                  * but we hold reservation lock for the BO.
8396                  */
8397                 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
8398                                                         false,
8399                                                         msecs_to_jiffies(5000));
8400                 if (unlikely(r <= 0))
8401                         DRM_ERROR("Waiting for fences timed out!");
8402
8403                 fill_dc_plane_info_and_addr(
8404                         dm->adev, new_plane_state,
8405                         afb->tiling_flags,
8406                         &bundle->plane_infos[planes_count],
8407                         &bundle->flip_addrs[planes_count].address,
8408                         afb->tmz_surface, false);
8409
8410                 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
8411                                  new_plane_state->plane->index,
8412                                  bundle->plane_infos[planes_count].dcc.enable);
8413
8414                 bundle->surface_updates[planes_count].plane_info =
8415                         &bundle->plane_infos[planes_count];
8416
8417                 /*
8418                  * Only allow immediate flips for fast updates that don't
8419                  * change FB pitch, DCC state, rotation or mirroing.
8420                  */
8421                 bundle->flip_addrs[planes_count].flip_immediate =
8422                         crtc->state->async_flip &&
8423                         acrtc_state->update_type == UPDATE_TYPE_FAST;
8424
8425                 timestamp_ns = ktime_get_ns();
8426                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8427                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8428                 bundle->surface_updates[planes_count].surface = dc_plane;
8429
8430                 if (!bundle->surface_updates[planes_count].surface) {
8431                         DRM_ERROR("No surface for CRTC: id=%d\n",
8432                                         acrtc_attach->crtc_id);
8433                         continue;
8434                 }
8435
8436                 if (plane == pcrtc->primary)
8437                         update_freesync_state_on_stream(
8438                                 dm,
8439                                 acrtc_state,
8440                                 acrtc_state->stream,
8441                                 dc_plane,
8442                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
8443
8444                 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
8445                                  __func__,
8446                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8447                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
8448
8449                 planes_count += 1;
8450
8451         }
8452
8453         if (pflip_present) {
8454                 if (!vrr_active) {
8455                         /* Use old throttling in non-vrr fixed refresh rate mode
8456                          * to keep flip scheduling based on target vblank counts
8457                          * working in a backwards compatible way, e.g., for
8458                          * clients using the GLX_OML_sync_control extension or
8459                          * DRI3/Present extension with defined target_msc.
8460                          */
8461                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
8462                 }
8463                 else {
8464                         /* For variable refresh rate mode only:
8465                          * Get vblank of last completed flip to avoid > 1 vrr
8466                          * flips per video frame by use of throttling, but allow
8467                          * flip programming anywhere in the possibly large
8468                          * variable vrr vblank interval for fine-grained flip
8469                          * timing control and more opportunity to avoid stutter
8470                          * on late submission of flips.
8471                          */
8472                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8473                         last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
8474                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8475                 }
8476
8477                 target_vblank = last_flip_vblank + wait_for_vblank;
8478
8479                 /*
8480                  * Wait until we're out of the vertical blank period before the one
8481                  * targeted by the flip
8482                  */
8483                 while ((acrtc_attach->enabled &&
8484                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8485                                                             0, &vpos, &hpos, NULL,
8486                                                             NULL, &pcrtc->hwmode)
8487                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8488                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8489                         (int)(target_vblank -
8490                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8491                         usleep_range(1000, 1100);
8492                 }
8493
8494                 /**
8495                  * Prepare the flip event for the pageflip interrupt to handle.
8496                  *
8497                  * This only works in the case where we've already turned on the
8498                  * appropriate hardware blocks (eg. HUBP) so in the transition case
8499                  * from 0 -> n planes we have to skip a hardware generated event
8500                  * and rely on sending it from software.
8501                  */
8502                 if (acrtc_attach->base.state->event &&
8503                     acrtc_state->active_planes > 0) {
8504                         drm_crtc_vblank_get(pcrtc);
8505
8506                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8507
8508                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8509                         prepare_flip_isr(acrtc_attach);
8510
8511                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8512                 }
8513
8514                 if (acrtc_state->stream) {
8515                         if (acrtc_state->freesync_vrr_info_changed)
8516                                 bundle->stream_update.vrr_infopacket =
8517                                         &acrtc_state->stream->vrr_infopacket;
8518                 }
8519         }
8520
8521         /* Update the planes if changed or disable if we don't have any. */
8522         if ((planes_count || acrtc_state->active_planes == 0) &&
8523                 acrtc_state->stream) {
8524                 bundle->stream_update.stream = acrtc_state->stream;
8525                 if (new_pcrtc_state->mode_changed) {
8526                         bundle->stream_update.src = acrtc_state->stream->src;
8527                         bundle->stream_update.dst = acrtc_state->stream->dst;
8528                 }
8529
8530                 if (new_pcrtc_state->color_mgmt_changed) {
8531                         /*
8532                          * TODO: This isn't fully correct since we've actually
8533                          * already modified the stream in place.
8534                          */
8535                         bundle->stream_update.gamut_remap =
8536                                 &acrtc_state->stream->gamut_remap_matrix;
8537                         bundle->stream_update.output_csc_transform =
8538                                 &acrtc_state->stream->csc_color_matrix;
8539                         bundle->stream_update.out_transfer_func =
8540                                 acrtc_state->stream->out_transfer_func;
8541                 }
8542
8543                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
8544                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
8545                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
8546
8547                 /*
8548                  * If FreeSync state on the stream has changed then we need to
8549                  * re-adjust the min/max bounds now that DC doesn't handle this
8550                  * as part of commit.
8551                  */
8552                 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
8553                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8554                         dc_stream_adjust_vmin_vmax(
8555                                 dm->dc, acrtc_state->stream,
8556                                 &acrtc_attach->dm_irq_params.vrr_params.adjust);
8557                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8558                 }
8559                 mutex_lock(&dm->dc_lock);
8560                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8561                                 acrtc_state->stream->link->psr_settings.psr_allow_active)
8562                         amdgpu_dm_psr_disable(acrtc_state->stream);
8563
8564                 dc_commit_updates_for_stream(dm->dc,
8565                                                      bundle->surface_updates,
8566                                                      planes_count,
8567                                                      acrtc_state->stream,
8568                                                      &bundle->stream_update,
8569                                                      dc_state);
8570
8571                 /**
8572                  * Enable or disable the interrupts on the backend.
8573                  *
8574                  * Most pipes are put into power gating when unused.
8575                  *
8576                  * When power gating is enabled on a pipe we lose the
8577                  * interrupt enablement state when power gating is disabled.
8578                  *
8579                  * So we need to update the IRQ control state in hardware
8580                  * whenever the pipe turns on (since it could be previously
8581                  * power gated) or off (since some pipes can't be power gated
8582                  * on some ASICs).
8583                  */
8584                 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8585                         dm_update_pflip_irq_state(drm_to_adev(dev),
8586                                                   acrtc_attach);
8587
8588                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8589                                 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8590                                 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8591                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
8592                 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
8593                                 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8594                                 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
8595                         amdgpu_dm_psr_enable(acrtc_state->stream);
8596                 }
8597
8598                 mutex_unlock(&dm->dc_lock);
8599         }
8600
8601         /*
8602          * Update cursor state *after* programming all the planes.
8603          * This avoids redundant programming in the case where we're going
8604          * to be disabling a single plane - those pipes are being disabled.
8605          */
8606         if (acrtc_state->active_planes)
8607                 amdgpu_dm_commit_cursors(state);
8608
8609 cleanup:
8610         kfree(bundle);
8611 }
8612
8613 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8614                                    struct drm_atomic_state *state)
8615 {
8616         struct amdgpu_device *adev = drm_to_adev(dev);
8617         struct amdgpu_dm_connector *aconnector;
8618         struct drm_connector *connector;
8619         struct drm_connector_state *old_con_state, *new_con_state;
8620         struct drm_crtc_state *new_crtc_state;
8621         struct dm_crtc_state *new_dm_crtc_state;
8622         const struct dc_stream_status *status;
8623         int i, inst;
8624
8625         /* Notify device removals. */
8626         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8627                 if (old_con_state->crtc != new_con_state->crtc) {
8628                         /* CRTC changes require notification. */
8629                         goto notify;
8630                 }
8631
8632                 if (!new_con_state->crtc)
8633                         continue;
8634
8635                 new_crtc_state = drm_atomic_get_new_crtc_state(
8636                         state, new_con_state->crtc);
8637
8638                 if (!new_crtc_state)
8639                         continue;
8640
8641                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8642                         continue;
8643
8644         notify:
8645                 aconnector = to_amdgpu_dm_connector(connector);
8646
8647                 mutex_lock(&adev->dm.audio_lock);
8648                 inst = aconnector->audio_inst;
8649                 aconnector->audio_inst = -1;
8650                 mutex_unlock(&adev->dm.audio_lock);
8651
8652                 amdgpu_dm_audio_eld_notify(adev, inst);
8653         }
8654
8655         /* Notify audio device additions. */
8656         for_each_new_connector_in_state(state, connector, new_con_state, i) {
8657                 if (!new_con_state->crtc)
8658                         continue;
8659
8660                 new_crtc_state = drm_atomic_get_new_crtc_state(
8661                         state, new_con_state->crtc);
8662
8663                 if (!new_crtc_state)
8664                         continue;
8665
8666                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8667                         continue;
8668
8669                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8670                 if (!new_dm_crtc_state->stream)
8671                         continue;
8672
8673                 status = dc_stream_get_status(new_dm_crtc_state->stream);
8674                 if (!status)
8675                         continue;
8676
8677                 aconnector = to_amdgpu_dm_connector(connector);
8678
8679                 mutex_lock(&adev->dm.audio_lock);
8680                 inst = status->audio_inst;
8681                 aconnector->audio_inst = inst;
8682                 mutex_unlock(&adev->dm.audio_lock);
8683
8684                 amdgpu_dm_audio_eld_notify(adev, inst);
8685         }
8686 }
8687
8688 /*
8689  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8690  * @crtc_state: the DRM CRTC state
8691  * @stream_state: the DC stream state.
8692  *
8693  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8694  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8695  */
8696 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8697                                                 struct dc_stream_state *stream_state)
8698 {
8699         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8700 }
8701
8702 /**
8703  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8704  * @state: The atomic state to commit
8705  *
8706  * This will tell DC to commit the constructed DC state from atomic_check,
8707  * programming the hardware. Any failures here implies a hardware failure, since
8708  * atomic check should have filtered anything non-kosher.
8709  */
8710 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8711 {
8712         struct drm_device *dev = state->dev;
8713         struct amdgpu_device *adev = drm_to_adev(dev);
8714         struct amdgpu_display_manager *dm = &adev->dm;
8715         struct dm_atomic_state *dm_state;
8716         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8717         uint32_t i, j;
8718         struct drm_crtc *crtc;
8719         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8720         unsigned long flags;
8721         bool wait_for_vblank = true;
8722         struct drm_connector *connector;
8723         struct drm_connector_state *old_con_state, *new_con_state;
8724         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8725         int crtc_disable_count = 0;
8726         bool mode_set_reset_required = false;
8727
8728         trace_amdgpu_dm_atomic_commit_tail_begin(state);
8729
8730         drm_atomic_helper_update_legacy_modeset_state(dev, state);
8731
8732         dm_state = dm_atomic_get_new_state(state);
8733         if (dm_state && dm_state->context) {
8734                 dc_state = dm_state->context;
8735         } else {
8736                 /* No state changes, retain current state. */
8737                 dc_state_temp = dc_create_state(dm->dc);
8738                 ASSERT(dc_state_temp);
8739                 dc_state = dc_state_temp;
8740                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8741         }
8742
8743         for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8744                                        new_crtc_state, i) {
8745                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8746
8747                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8748
8749                 if (old_crtc_state->active &&
8750                     (!new_crtc_state->active ||
8751                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8752                         manage_dm_interrupts(adev, acrtc, false);
8753                         dc_stream_release(dm_old_crtc_state->stream);
8754                 }
8755         }
8756
8757         drm_atomic_helper_calc_timestamping_constants(state);
8758
8759         /* update changed items */
8760         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8761                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8762
8763                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8764                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8765
8766                 DRM_DEBUG_ATOMIC(
8767                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8768                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8769                         "connectors_changed:%d\n",
8770                         acrtc->crtc_id,
8771                         new_crtc_state->enable,
8772                         new_crtc_state->active,
8773                         new_crtc_state->planes_changed,
8774                         new_crtc_state->mode_changed,
8775                         new_crtc_state->active_changed,
8776                         new_crtc_state->connectors_changed);
8777
8778                 /* Disable cursor if disabling crtc */
8779                 if (old_crtc_state->active && !new_crtc_state->active) {
8780                         struct dc_cursor_position position;
8781
8782                         memset(&position, 0, sizeof(position));
8783                         mutex_lock(&dm->dc_lock);
8784                         dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8785                         mutex_unlock(&dm->dc_lock);
8786                 }
8787
8788                 /* Copy all transient state flags into dc state */
8789                 if (dm_new_crtc_state->stream) {
8790                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8791                                                             dm_new_crtc_state->stream);
8792                 }
8793
8794                 /* handles headless hotplug case, updating new_state and
8795                  * aconnector as needed
8796                  */
8797
8798                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8799
8800                         DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8801
8802                         if (!dm_new_crtc_state->stream) {
8803                                 /*
8804                                  * this could happen because of issues with
8805                                  * userspace notifications delivery.
8806                                  * In this case userspace tries to set mode on
8807                                  * display which is disconnected in fact.
8808                                  * dc_sink is NULL in this case on aconnector.
8809                                  * We expect reset mode will come soon.
8810                                  *
8811                                  * This can also happen when unplug is done
8812                                  * during resume sequence ended
8813                                  *
8814                                  * In this case, we want to pretend we still
8815                                  * have a sink to keep the pipe running so that
8816                                  * hw state is consistent with the sw state
8817                                  */
8818                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8819                                                 __func__, acrtc->base.base.id);
8820                                 continue;
8821                         }
8822
8823                         if (dm_old_crtc_state->stream)
8824                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8825
8826                         pm_runtime_get_noresume(dev->dev);
8827
8828                         acrtc->enabled = true;
8829                         acrtc->hw_mode = new_crtc_state->mode;
8830                         crtc->hwmode = new_crtc_state->mode;
8831                         mode_set_reset_required = true;
8832                 } else if (modereset_required(new_crtc_state)) {
8833                         DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8834                         /* i.e. reset mode */
8835                         if (dm_old_crtc_state->stream)
8836                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8837
8838                         mode_set_reset_required = true;
8839                 }
8840         } /* for_each_crtc_in_state() */
8841
8842         if (dc_state) {
8843                 /* if there mode set or reset, disable eDP PSR */
8844                 if (mode_set_reset_required)
8845                         amdgpu_dm_psr_disable_all(dm);
8846
8847                 dm_enable_per_frame_crtc_master_sync(dc_state);
8848                 mutex_lock(&dm->dc_lock);
8849                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
8850 #if defined(CONFIG_DRM_AMD_DC_DCN)
8851                /* Allow idle optimization when vblank count is 0 for display off */
8852                if (dm->active_vblank_irq_count == 0)
8853                    dc_allow_idle_optimizations(dm->dc,true);
8854 #endif
8855                 mutex_unlock(&dm->dc_lock);
8856         }
8857
8858         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8859                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8860
8861                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8862
8863                 if (dm_new_crtc_state->stream != NULL) {
8864                         const struct dc_stream_status *status =
8865                                         dc_stream_get_status(dm_new_crtc_state->stream);
8866
8867                         if (!status)
8868                                 status = dc_stream_get_status_from_state(dc_state,
8869                                                                          dm_new_crtc_state->stream);
8870                         if (!status)
8871                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8872                         else
8873                                 acrtc->otg_inst = status->primary_otg_inst;
8874                 }
8875         }
8876 #ifdef CONFIG_DRM_AMD_DC_HDCP
8877         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8878                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8879                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8880                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8881
8882                 new_crtc_state = NULL;
8883
8884                 if (acrtc)
8885                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8886
8887                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8888
8889                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8890                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8891                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8892                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8893                         dm_new_con_state->update_hdcp = true;
8894                         continue;
8895                 }
8896
8897                 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8898                         hdcp_update_display(
8899                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8900                                 new_con_state->hdcp_content_type,
8901                                 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
8902         }
8903 #endif
8904
8905         /* Handle connector state changes */
8906         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8907                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8908                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8909                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8910                 struct dc_surface_update dummy_updates[MAX_SURFACES];
8911                 struct dc_stream_update stream_update;
8912                 struct dc_info_packet hdr_packet;
8913                 struct dc_stream_status *status = NULL;
8914                 bool abm_changed, hdr_changed, scaling_changed;
8915
8916                 memset(&dummy_updates, 0, sizeof(dummy_updates));
8917                 memset(&stream_update, 0, sizeof(stream_update));
8918
8919                 if (acrtc) {
8920                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8921                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8922                 }
8923
8924                 /* Skip any modesets/resets */
8925                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8926                         continue;
8927
8928                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8929                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8930
8931                 scaling_changed = is_scaling_state_different(dm_new_con_state,
8932                                                              dm_old_con_state);
8933
8934                 abm_changed = dm_new_crtc_state->abm_level !=
8935                               dm_old_crtc_state->abm_level;
8936
8937                 hdr_changed =
8938                         is_hdr_metadata_different(old_con_state, new_con_state);
8939
8940                 if (!scaling_changed && !abm_changed && !hdr_changed)
8941                         continue;
8942
8943                 stream_update.stream = dm_new_crtc_state->stream;
8944                 if (scaling_changed) {
8945                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8946                                         dm_new_con_state, dm_new_crtc_state->stream);
8947
8948                         stream_update.src = dm_new_crtc_state->stream->src;
8949                         stream_update.dst = dm_new_crtc_state->stream->dst;
8950                 }
8951
8952                 if (abm_changed) {
8953                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8954
8955                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
8956                 }
8957
8958                 if (hdr_changed) {
8959                         fill_hdr_info_packet(new_con_state, &hdr_packet);
8960                         stream_update.hdr_static_metadata = &hdr_packet;
8961                 }
8962
8963                 status = dc_stream_get_status(dm_new_crtc_state->stream);
8964                 WARN_ON(!status);
8965                 WARN_ON(!status->plane_count);
8966
8967                 /*
8968                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
8969                  * Here we create an empty update on each plane.
8970                  * To fix this, DC should permit updating only stream properties.
8971                  */
8972                 for (j = 0; j < status->plane_count; j++)
8973                         dummy_updates[j].surface = status->plane_states[0];
8974
8975
8976                 mutex_lock(&dm->dc_lock);
8977                 dc_commit_updates_for_stream(dm->dc,
8978                                                      dummy_updates,
8979                                                      status->plane_count,
8980                                                      dm_new_crtc_state->stream,
8981                                                      &stream_update,
8982                                                      dc_state);
8983                 mutex_unlock(&dm->dc_lock);
8984         }
8985
8986         /* Count number of newly disabled CRTCs for dropping PM refs later. */
8987         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8988                                       new_crtc_state, i) {
8989                 if (old_crtc_state->active && !new_crtc_state->active)
8990                         crtc_disable_count++;
8991
8992                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8993                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8994
8995                 /* For freesync config update on crtc state and params for irq */
8996                 update_stream_irq_parameters(dm, dm_new_crtc_state);
8997
8998                 /* Handle vrr on->off / off->on transitions */
8999                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9000                                                 dm_new_crtc_state);
9001         }
9002
9003         /**
9004          * Enable interrupts for CRTCs that are newly enabled or went through
9005          * a modeset. It was intentionally deferred until after the front end
9006          * state was modified to wait until the OTG was on and so the IRQ
9007          * handlers didn't access stale or invalid state.
9008          */
9009         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9010                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9011 #ifdef CONFIG_DEBUG_FS
9012                 bool configure_crc = false;
9013                 enum amdgpu_dm_pipe_crc_source cur_crc_src;
9014 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9015                 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9016 #endif
9017                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9018                 cur_crc_src = acrtc->dm_irq_params.crc_src;
9019                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9020 #endif
9021                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9022
9023                 if (new_crtc_state->active &&
9024                     (!old_crtc_state->active ||
9025                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9026                         dc_stream_retain(dm_new_crtc_state->stream);
9027                         acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9028                         manage_dm_interrupts(adev, acrtc, true);
9029
9030 #ifdef CONFIG_DEBUG_FS
9031                         /**
9032                          * Frontend may have changed so reapply the CRC capture
9033                          * settings for the stream.
9034                          */
9035                         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9036
9037                         if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9038                                 configure_crc = true;
9039 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9040                                 if (amdgpu_dm_crc_window_is_activated(crtc)) {
9041                                         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9042                                         acrtc->dm_irq_params.crc_window.update_win = true;
9043                                         acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9044                                         spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9045                                         crc_rd_wrk->crtc = crtc;
9046                                         spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9047                                         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9048                                 }
9049 #endif
9050                         }
9051
9052                         if (configure_crc)
9053                                 if (amdgpu_dm_crtc_configure_crc_source(
9054                                         crtc, dm_new_crtc_state, cur_crc_src))
9055                                         DRM_DEBUG_DRIVER("Failed to configure crc source");
9056 #endif
9057                 }
9058         }
9059
9060         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9061                 if (new_crtc_state->async_flip)
9062                         wait_for_vblank = false;
9063
9064         /* update planes when needed per crtc*/
9065         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9066                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9067
9068                 if (dm_new_crtc_state->stream)
9069                         amdgpu_dm_commit_planes(state, dc_state, dev,
9070                                                 dm, crtc, wait_for_vblank);
9071         }
9072
9073         /* Update audio instances for each connector. */
9074         amdgpu_dm_commit_audio(dev, state);
9075
9076         /*
9077          * send vblank event on all events not handled in flip and
9078          * mark consumed event for drm_atomic_helper_commit_hw_done
9079          */
9080         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9081         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9082
9083                 if (new_crtc_state->event)
9084                         drm_send_event_locked(dev, &new_crtc_state->event->base);
9085
9086                 new_crtc_state->event = NULL;
9087         }
9088         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9089
9090         /* Signal HW programming completion */
9091         drm_atomic_helper_commit_hw_done(state);
9092
9093         if (wait_for_vblank)
9094                 drm_atomic_helper_wait_for_flip_done(dev, state);
9095
9096         drm_atomic_helper_cleanup_planes(dev, state);
9097
9098         /* return the stolen vga memory back to VRAM */
9099         if (!adev->mman.keep_stolen_vga_memory)
9100                 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9101         amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9102
9103         /*
9104          * Finally, drop a runtime PM reference for each newly disabled CRTC,
9105          * so we can put the GPU into runtime suspend if we're not driving any
9106          * displays anymore
9107          */
9108         for (i = 0; i < crtc_disable_count; i++)
9109                 pm_runtime_put_autosuspend(dev->dev);
9110         pm_runtime_mark_last_busy(dev->dev);
9111
9112         if (dc_state_temp)
9113                 dc_release_state(dc_state_temp);
9114 }
9115
9116
9117 static int dm_force_atomic_commit(struct drm_connector *connector)
9118 {
9119         int ret = 0;
9120         struct drm_device *ddev = connector->dev;
9121         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9122         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9123         struct drm_plane *plane = disconnected_acrtc->base.primary;
9124         struct drm_connector_state *conn_state;
9125         struct drm_crtc_state *crtc_state;
9126         struct drm_plane_state *plane_state;
9127
9128         if (!state)
9129                 return -ENOMEM;
9130
9131         state->acquire_ctx = ddev->mode_config.acquire_ctx;
9132
9133         /* Construct an atomic state to restore previous display setting */
9134
9135         /*
9136          * Attach connectors to drm_atomic_state
9137          */
9138         conn_state = drm_atomic_get_connector_state(state, connector);
9139
9140         ret = PTR_ERR_OR_ZERO(conn_state);
9141         if (ret)
9142                 goto out;
9143
9144         /* Attach crtc to drm_atomic_state*/
9145         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9146
9147         ret = PTR_ERR_OR_ZERO(crtc_state);
9148         if (ret)
9149                 goto out;
9150
9151         /* force a restore */
9152         crtc_state->mode_changed = true;
9153
9154         /* Attach plane to drm_atomic_state */
9155         plane_state = drm_atomic_get_plane_state(state, plane);
9156
9157         ret = PTR_ERR_OR_ZERO(plane_state);
9158         if (ret)
9159                 goto out;
9160
9161         /* Call commit internally with the state we just constructed */
9162         ret = drm_atomic_commit(state);
9163
9164 out:
9165         drm_atomic_state_put(state);
9166         if (ret)
9167                 DRM_ERROR("Restoring old state failed with %i\n", ret);
9168
9169         return ret;
9170 }
9171
9172 /*
9173  * This function handles all cases when set mode does not come upon hotplug.
9174  * This includes when a display is unplugged then plugged back into the
9175  * same port and when running without usermode desktop manager supprot
9176  */
9177 void dm_restore_drm_connector_state(struct drm_device *dev,
9178                                     struct drm_connector *connector)
9179 {
9180         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9181         struct amdgpu_crtc *disconnected_acrtc;
9182         struct dm_crtc_state *acrtc_state;
9183
9184         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9185                 return;
9186
9187         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9188         if (!disconnected_acrtc)
9189                 return;
9190
9191         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9192         if (!acrtc_state->stream)
9193                 return;
9194
9195         /*
9196          * If the previous sink is not released and different from the current,
9197          * we deduce we are in a state where we can not rely on usermode call
9198          * to turn on the display, so we do it here
9199          */
9200         if (acrtc_state->stream->sink != aconnector->dc_sink)
9201                 dm_force_atomic_commit(&aconnector->base);
9202 }
9203
9204 /*
9205  * Grabs all modesetting locks to serialize against any blocking commits,
9206  * Waits for completion of all non blocking commits.
9207  */
9208 static int do_aquire_global_lock(struct drm_device *dev,
9209                                  struct drm_atomic_state *state)
9210 {
9211         struct drm_crtc *crtc;
9212         struct drm_crtc_commit *commit;
9213         long ret;
9214
9215         /*
9216          * Adding all modeset locks to aquire_ctx will
9217          * ensure that when the framework release it the
9218          * extra locks we are locking here will get released to
9219          */
9220         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9221         if (ret)
9222                 return ret;
9223
9224         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9225                 spin_lock(&crtc->commit_lock);
9226                 commit = list_first_entry_or_null(&crtc->commit_list,
9227                                 struct drm_crtc_commit, commit_entry);
9228                 if (commit)
9229                         drm_crtc_commit_get(commit);
9230                 spin_unlock(&crtc->commit_lock);
9231
9232                 if (!commit)
9233                         continue;
9234
9235                 /*
9236                  * Make sure all pending HW programming completed and
9237                  * page flips done
9238                  */
9239                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9240
9241                 if (ret > 0)
9242                         ret = wait_for_completion_interruptible_timeout(
9243                                         &commit->flip_done, 10*HZ);
9244
9245                 if (ret == 0)
9246                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9247                                   "timed out\n", crtc->base.id, crtc->name);
9248
9249                 drm_crtc_commit_put(commit);
9250         }
9251
9252         return ret < 0 ? ret : 0;
9253 }
9254
9255 static void get_freesync_config_for_crtc(
9256         struct dm_crtc_state *new_crtc_state,
9257         struct dm_connector_state *new_con_state)
9258 {
9259         struct mod_freesync_config config = {0};
9260         struct amdgpu_dm_connector *aconnector =
9261                         to_amdgpu_dm_connector(new_con_state->base.connector);
9262         struct drm_display_mode *mode = &new_crtc_state->base.mode;
9263         int vrefresh = drm_mode_vrefresh(mode);
9264         bool fs_vid_mode = false;
9265
9266         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9267                                         vrefresh >= aconnector->min_vfreq &&
9268                                         vrefresh <= aconnector->max_vfreq;
9269
9270         if (new_crtc_state->vrr_supported) {
9271                 new_crtc_state->stream->ignore_msa_timing_param = true;
9272                 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9273
9274                 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9275                 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9276                 config.vsif_supported = true;
9277                 config.btr = true;
9278
9279                 if (fs_vid_mode) {
9280                         config.state = VRR_STATE_ACTIVE_FIXED;
9281                         config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9282                         goto out;
9283                 } else if (new_crtc_state->base.vrr_enabled) {
9284                         config.state = VRR_STATE_ACTIVE_VARIABLE;
9285                 } else {
9286                         config.state = VRR_STATE_INACTIVE;
9287                 }
9288         }
9289 out:
9290         new_crtc_state->freesync_config = config;
9291 }
9292
9293 static void reset_freesync_config_for_crtc(
9294         struct dm_crtc_state *new_crtc_state)
9295 {
9296         new_crtc_state->vrr_supported = false;
9297
9298         memset(&new_crtc_state->vrr_infopacket, 0,
9299                sizeof(new_crtc_state->vrr_infopacket));
9300 }
9301
9302 static bool
9303 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9304                                  struct drm_crtc_state *new_crtc_state)
9305 {
9306         struct drm_display_mode old_mode, new_mode;
9307
9308         if (!old_crtc_state || !new_crtc_state)
9309                 return false;
9310
9311         old_mode = old_crtc_state->mode;
9312         new_mode = new_crtc_state->mode;
9313
9314         if (old_mode.clock       == new_mode.clock &&
9315             old_mode.hdisplay    == new_mode.hdisplay &&
9316             old_mode.vdisplay    == new_mode.vdisplay &&
9317             old_mode.htotal      == new_mode.htotal &&
9318             old_mode.vtotal      != new_mode.vtotal &&
9319             old_mode.hsync_start == new_mode.hsync_start &&
9320             old_mode.vsync_start != new_mode.vsync_start &&
9321             old_mode.hsync_end   == new_mode.hsync_end &&
9322             old_mode.vsync_end   != new_mode.vsync_end &&
9323             old_mode.hskew       == new_mode.hskew &&
9324             old_mode.vscan       == new_mode.vscan &&
9325             (old_mode.vsync_end - old_mode.vsync_start) ==
9326             (new_mode.vsync_end - new_mode.vsync_start))
9327                 return true;
9328
9329         return false;
9330 }
9331
9332 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9333         uint64_t num, den, res;
9334         struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9335
9336         dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9337
9338         num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9339         den = (unsigned long long)new_crtc_state->mode.htotal *
9340               (unsigned long long)new_crtc_state->mode.vtotal;
9341
9342         res = div_u64(num, den);
9343         dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9344 }
9345
9346 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9347                                 struct drm_atomic_state *state,
9348                                 struct drm_crtc *crtc,
9349                                 struct drm_crtc_state *old_crtc_state,
9350                                 struct drm_crtc_state *new_crtc_state,
9351                                 bool enable,
9352                                 bool *lock_and_validation_needed)
9353 {
9354         struct dm_atomic_state *dm_state = NULL;
9355         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9356         struct dc_stream_state *new_stream;
9357         int ret = 0;
9358
9359         /*
9360          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9361          * update changed items
9362          */
9363         struct amdgpu_crtc *acrtc = NULL;
9364         struct amdgpu_dm_connector *aconnector = NULL;
9365         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9366         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
9367
9368         new_stream = NULL;
9369
9370         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9371         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9372         acrtc = to_amdgpu_crtc(crtc);
9373         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
9374
9375         /* TODO This hack should go away */
9376         if (aconnector && enable) {
9377                 /* Make sure fake sink is created in plug-in scenario */
9378                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9379                                                             &aconnector->base);
9380                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9381                                                             &aconnector->base);
9382
9383                 if (IS_ERR(drm_new_conn_state)) {
9384                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9385                         goto fail;
9386                 }
9387
9388                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9389                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
9390
9391                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9392                         goto skip_modeset;
9393
9394                 new_stream = create_validate_stream_for_sink(aconnector,
9395                                                              &new_crtc_state->mode,
9396                                                              dm_new_conn_state,
9397                                                              dm_old_crtc_state->stream);
9398
9399                 /*
9400                  * we can have no stream on ACTION_SET if a display
9401                  * was disconnected during S3, in this case it is not an
9402                  * error, the OS will be updated after detection, and
9403                  * will do the right thing on next atomic commit
9404                  */
9405
9406                 if (!new_stream) {
9407                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9408                                         __func__, acrtc->base.base.id);
9409                         ret = -ENOMEM;
9410                         goto fail;
9411                 }
9412
9413                 /*
9414                  * TODO: Check VSDB bits to decide whether this should
9415                  * be enabled or not.
9416                  */
9417                 new_stream->triggered_crtc_reset.enabled =
9418                         dm->force_timing_sync;
9419
9420                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9421
9422                 ret = fill_hdr_info_packet(drm_new_conn_state,
9423                                            &new_stream->hdr_static_metadata);
9424                 if (ret)
9425                         goto fail;
9426
9427                 /*
9428                  * If we already removed the old stream from the context
9429                  * (and set the new stream to NULL) then we can't reuse
9430                  * the old stream even if the stream and scaling are unchanged.
9431                  * We'll hit the BUG_ON and black screen.
9432                  *
9433                  * TODO: Refactor this function to allow this check to work
9434                  * in all conditions.
9435                  */
9436                 if (amdgpu_freesync_vid_mode &&
9437                     dm_new_crtc_state->stream &&
9438                     is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9439                         goto skip_modeset;
9440
9441                 if (dm_new_crtc_state->stream &&
9442                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9443                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9444                         new_crtc_state->mode_changed = false;
9445                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9446                                          new_crtc_state->mode_changed);
9447                 }
9448         }
9449
9450         /* mode_changed flag may get updated above, need to check again */
9451         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9452                 goto skip_modeset;
9453
9454         DRM_DEBUG_ATOMIC(
9455                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9456                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9457                 "connectors_changed:%d\n",
9458                 acrtc->crtc_id,
9459                 new_crtc_state->enable,
9460                 new_crtc_state->active,
9461                 new_crtc_state->planes_changed,
9462                 new_crtc_state->mode_changed,
9463                 new_crtc_state->active_changed,
9464                 new_crtc_state->connectors_changed);
9465
9466         /* Remove stream for any changed/disabled CRTC */
9467         if (!enable) {
9468
9469                 if (!dm_old_crtc_state->stream)
9470                         goto skip_modeset;
9471
9472                 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
9473                     is_timing_unchanged_for_freesync(new_crtc_state,
9474                                                      old_crtc_state)) {
9475                         new_crtc_state->mode_changed = false;
9476                         DRM_DEBUG_DRIVER(
9477                                 "Mode change not required for front porch change, "
9478                                 "setting mode_changed to %d",
9479                                 new_crtc_state->mode_changed);
9480
9481                         set_freesync_fixed_config(dm_new_crtc_state);
9482
9483                         goto skip_modeset;
9484                 } else if (amdgpu_freesync_vid_mode && aconnector &&
9485                            is_freesync_video_mode(&new_crtc_state->mode,
9486                                                   aconnector)) {
9487                         set_freesync_fixed_config(dm_new_crtc_state);
9488                 }
9489
9490                 ret = dm_atomic_get_state(state, &dm_state);
9491                 if (ret)
9492                         goto fail;
9493
9494                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9495                                 crtc->base.id);
9496
9497                 /* i.e. reset mode */
9498                 if (dc_remove_stream_from_ctx(
9499                                 dm->dc,
9500                                 dm_state->context,
9501                                 dm_old_crtc_state->stream) != DC_OK) {
9502                         ret = -EINVAL;
9503                         goto fail;
9504                 }
9505
9506                 dc_stream_release(dm_old_crtc_state->stream);
9507                 dm_new_crtc_state->stream = NULL;
9508
9509                 reset_freesync_config_for_crtc(dm_new_crtc_state);
9510
9511                 *lock_and_validation_needed = true;
9512
9513         } else {/* Add stream for any updated/enabled CRTC */
9514                 /*
9515                  * Quick fix to prevent NULL pointer on new_stream when
9516                  * added MST connectors not found in existing crtc_state in the chained mode
9517                  * TODO: need to dig out the root cause of that
9518                  */
9519                 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
9520                         goto skip_modeset;
9521
9522                 if (modereset_required(new_crtc_state))
9523                         goto skip_modeset;
9524
9525                 if (modeset_required(new_crtc_state, new_stream,
9526                                      dm_old_crtc_state->stream)) {
9527
9528                         WARN_ON(dm_new_crtc_state->stream);
9529
9530                         ret = dm_atomic_get_state(state, &dm_state);
9531                         if (ret)
9532                                 goto fail;
9533
9534                         dm_new_crtc_state->stream = new_stream;
9535
9536                         dc_stream_retain(new_stream);
9537
9538                         DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
9539                                          crtc->base.id);
9540
9541                         if (dc_add_stream_to_ctx(
9542                                         dm->dc,
9543                                         dm_state->context,
9544                                         dm_new_crtc_state->stream) != DC_OK) {
9545                                 ret = -EINVAL;
9546                                 goto fail;
9547                         }
9548
9549                         *lock_and_validation_needed = true;
9550                 }
9551         }
9552
9553 skip_modeset:
9554         /* Release extra reference */
9555         if (new_stream)
9556                  dc_stream_release(new_stream);
9557
9558         /*
9559          * We want to do dc stream updates that do not require a
9560          * full modeset below.
9561          */
9562         if (!(enable && aconnector && new_crtc_state->active))
9563                 return 0;
9564         /*
9565          * Given above conditions, the dc state cannot be NULL because:
9566          * 1. We're in the process of enabling CRTCs (just been added
9567          *    to the dc context, or already is on the context)
9568          * 2. Has a valid connector attached, and
9569          * 3. Is currently active and enabled.
9570          * => The dc stream state currently exists.
9571          */
9572         BUG_ON(dm_new_crtc_state->stream == NULL);
9573
9574         /* Scaling or underscan settings */
9575         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
9576                 update_stream_scaling_settings(
9577                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
9578
9579         /* ABM settings */
9580         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9581
9582         /*
9583          * Color management settings. We also update color properties
9584          * when a modeset is needed, to ensure it gets reprogrammed.
9585          */
9586         if (dm_new_crtc_state->base.color_mgmt_changed ||
9587             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9588                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
9589                 if (ret)
9590                         goto fail;
9591         }
9592
9593         /* Update Freesync settings. */
9594         get_freesync_config_for_crtc(dm_new_crtc_state,
9595                                      dm_new_conn_state);
9596
9597         return ret;
9598
9599 fail:
9600         if (new_stream)
9601                 dc_stream_release(new_stream);
9602         return ret;
9603 }
9604
9605 static bool should_reset_plane(struct drm_atomic_state *state,
9606                                struct drm_plane *plane,
9607                                struct drm_plane_state *old_plane_state,
9608                                struct drm_plane_state *new_plane_state)
9609 {
9610         struct drm_plane *other;
9611         struct drm_plane_state *old_other_state, *new_other_state;
9612         struct drm_crtc_state *new_crtc_state;
9613         int i;
9614
9615         /*
9616          * TODO: Remove this hack once the checks below are sufficient
9617          * enough to determine when we need to reset all the planes on
9618          * the stream.
9619          */
9620         if (state->allow_modeset)
9621                 return true;
9622
9623         /* Exit early if we know that we're adding or removing the plane. */
9624         if (old_plane_state->crtc != new_plane_state->crtc)
9625                 return true;
9626
9627         /* old crtc == new_crtc == NULL, plane not in context. */
9628         if (!new_plane_state->crtc)
9629                 return false;
9630
9631         new_crtc_state =
9632                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9633
9634         if (!new_crtc_state)
9635                 return true;
9636
9637         /* CRTC Degamma changes currently require us to recreate planes. */
9638         if (new_crtc_state->color_mgmt_changed)
9639                 return true;
9640
9641         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9642                 return true;
9643
9644         /*
9645          * If there are any new primary or overlay planes being added or
9646          * removed then the z-order can potentially change. To ensure
9647          * correct z-order and pipe acquisition the current DC architecture
9648          * requires us to remove and recreate all existing planes.
9649          *
9650          * TODO: Come up with a more elegant solution for this.
9651          */
9652         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9653                 struct amdgpu_framebuffer *old_afb, *new_afb;
9654                 if (other->type == DRM_PLANE_TYPE_CURSOR)
9655                         continue;
9656
9657                 if (old_other_state->crtc != new_plane_state->crtc &&
9658                     new_other_state->crtc != new_plane_state->crtc)
9659                         continue;
9660
9661                 if (old_other_state->crtc != new_other_state->crtc)
9662                         return true;
9663
9664                 /* Src/dst size and scaling updates. */
9665                 if (old_other_state->src_w != new_other_state->src_w ||
9666                     old_other_state->src_h != new_other_state->src_h ||
9667                     old_other_state->crtc_w != new_other_state->crtc_w ||
9668                     old_other_state->crtc_h != new_other_state->crtc_h)
9669                         return true;
9670
9671                 /* Rotation / mirroring updates. */
9672                 if (old_other_state->rotation != new_other_state->rotation)
9673                         return true;
9674
9675                 /* Blending updates. */
9676                 if (old_other_state->pixel_blend_mode !=
9677                     new_other_state->pixel_blend_mode)
9678                         return true;
9679
9680                 /* Alpha updates. */
9681                 if (old_other_state->alpha != new_other_state->alpha)
9682                         return true;
9683
9684                 /* Colorspace changes. */
9685                 if (old_other_state->color_range != new_other_state->color_range ||
9686                     old_other_state->color_encoding != new_other_state->color_encoding)
9687                         return true;
9688
9689                 /* Framebuffer checks fall at the end. */
9690                 if (!old_other_state->fb || !new_other_state->fb)
9691                         continue;
9692
9693                 /* Pixel format changes can require bandwidth updates. */
9694                 if (old_other_state->fb->format != new_other_state->fb->format)
9695                         return true;
9696
9697                 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9698                 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9699
9700                 /* Tiling and DCC changes also require bandwidth updates. */
9701                 if (old_afb->tiling_flags != new_afb->tiling_flags ||
9702                     old_afb->base.modifier != new_afb->base.modifier)
9703                         return true;
9704         }
9705
9706         return false;
9707 }
9708
9709 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9710                               struct drm_plane_state *new_plane_state,
9711                               struct drm_framebuffer *fb)
9712 {
9713         struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9714         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9715         unsigned int pitch;
9716         bool linear;
9717
9718         if (fb->width > new_acrtc->max_cursor_width ||
9719             fb->height > new_acrtc->max_cursor_height) {
9720                 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9721                                  new_plane_state->fb->width,
9722                                  new_plane_state->fb->height);
9723                 return -EINVAL;
9724         }
9725         if (new_plane_state->src_w != fb->width << 16 ||
9726             new_plane_state->src_h != fb->height << 16) {
9727                 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9728                 return -EINVAL;
9729         }
9730
9731         /* Pitch in pixels */
9732         pitch = fb->pitches[0] / fb->format->cpp[0];
9733
9734         if (fb->width != pitch) {
9735                 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9736                                  fb->width, pitch);
9737                 return -EINVAL;
9738         }
9739
9740         switch (pitch) {
9741         case 64:
9742         case 128:
9743         case 256:
9744                 /* FB pitch is supported by cursor plane */
9745                 break;
9746         default:
9747                 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9748                 return -EINVAL;
9749         }
9750
9751         /* Core DRM takes care of checking FB modifiers, so we only need to
9752          * check tiling flags when the FB doesn't have a modifier. */
9753         if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9754                 if (adev->family < AMDGPU_FAMILY_AI) {
9755                         linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9756                                  AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9757                                  AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9758                 } else {
9759                         linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9760                 }
9761                 if (!linear) {
9762                         DRM_DEBUG_ATOMIC("Cursor FB not linear");
9763                         return -EINVAL;
9764                 }
9765         }
9766
9767         return 0;
9768 }
9769
9770 static int dm_update_plane_state(struct dc *dc,
9771                                  struct drm_atomic_state *state,
9772                                  struct drm_plane *plane,
9773                                  struct drm_plane_state *old_plane_state,
9774                                  struct drm_plane_state *new_plane_state,
9775                                  bool enable,
9776                                  bool *lock_and_validation_needed)
9777 {
9778
9779         struct dm_atomic_state *dm_state = NULL;
9780         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9781         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9782         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9783         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9784         struct amdgpu_crtc *new_acrtc;
9785         bool needs_reset;
9786         int ret = 0;
9787
9788
9789         new_plane_crtc = new_plane_state->crtc;
9790         old_plane_crtc = old_plane_state->crtc;
9791         dm_new_plane_state = to_dm_plane_state(new_plane_state);
9792         dm_old_plane_state = to_dm_plane_state(old_plane_state);
9793
9794         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9795                 if (!enable || !new_plane_crtc ||
9796                         drm_atomic_plane_disabling(plane->state, new_plane_state))
9797                         return 0;
9798
9799                 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9800
9801                 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9802                         DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9803                         return -EINVAL;
9804                 }
9805
9806                 if (new_plane_state->fb) {
9807                         ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9808                                                  new_plane_state->fb);
9809                         if (ret)
9810                                 return ret;
9811                 }
9812
9813                 return 0;
9814         }
9815
9816         needs_reset = should_reset_plane(state, plane, old_plane_state,
9817                                          new_plane_state);
9818
9819         /* Remove any changed/removed planes */
9820         if (!enable) {
9821                 if (!needs_reset)
9822                         return 0;
9823
9824                 if (!old_plane_crtc)
9825                         return 0;
9826
9827                 old_crtc_state = drm_atomic_get_old_crtc_state(
9828                                 state, old_plane_crtc);
9829                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9830
9831                 if (!dm_old_crtc_state->stream)
9832                         return 0;
9833
9834                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9835                                 plane->base.id, old_plane_crtc->base.id);
9836
9837                 ret = dm_atomic_get_state(state, &dm_state);
9838                 if (ret)
9839                         return ret;
9840
9841                 if (!dc_remove_plane_from_context(
9842                                 dc,
9843                                 dm_old_crtc_state->stream,
9844                                 dm_old_plane_state->dc_state,
9845                                 dm_state->context)) {
9846
9847                         return -EINVAL;
9848                 }
9849
9850
9851                 dc_plane_state_release(dm_old_plane_state->dc_state);
9852                 dm_new_plane_state->dc_state = NULL;
9853
9854                 *lock_and_validation_needed = true;
9855
9856         } else { /* Add new planes */
9857                 struct dc_plane_state *dc_new_plane_state;
9858
9859                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9860                         return 0;
9861
9862                 if (!new_plane_crtc)
9863                         return 0;
9864
9865                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9866                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9867
9868                 if (!dm_new_crtc_state->stream)
9869                         return 0;
9870
9871                 if (!needs_reset)
9872                         return 0;
9873
9874                 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9875                 if (ret)
9876                         return ret;
9877
9878                 WARN_ON(dm_new_plane_state->dc_state);
9879
9880                 dc_new_plane_state = dc_create_plane_state(dc);
9881                 if (!dc_new_plane_state)
9882                         return -ENOMEM;
9883
9884                 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
9885                                  plane->base.id, new_plane_crtc->base.id);
9886
9887                 ret = fill_dc_plane_attributes(
9888                         drm_to_adev(new_plane_crtc->dev),
9889                         dc_new_plane_state,
9890                         new_plane_state,
9891                         new_crtc_state);
9892                 if (ret) {
9893                         dc_plane_state_release(dc_new_plane_state);
9894                         return ret;
9895                 }
9896
9897                 ret = dm_atomic_get_state(state, &dm_state);
9898                 if (ret) {
9899                         dc_plane_state_release(dc_new_plane_state);
9900                         return ret;
9901                 }
9902
9903                 /*
9904                  * Any atomic check errors that occur after this will
9905                  * not need a release. The plane state will be attached
9906                  * to the stream, and therefore part of the atomic
9907                  * state. It'll be released when the atomic state is
9908                  * cleaned.
9909                  */
9910                 if (!dc_add_plane_to_context(
9911                                 dc,
9912                                 dm_new_crtc_state->stream,
9913                                 dc_new_plane_state,
9914                                 dm_state->context)) {
9915
9916                         dc_plane_state_release(dc_new_plane_state);
9917                         return -EINVAL;
9918                 }
9919
9920                 dm_new_plane_state->dc_state = dc_new_plane_state;
9921
9922                 /* Tell DC to do a full surface update every time there
9923                  * is a plane change. Inefficient, but works for now.
9924                  */
9925                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9926
9927                 *lock_and_validation_needed = true;
9928         }
9929
9930
9931         return ret;
9932 }
9933
9934 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9935                                 struct drm_crtc *crtc,
9936                                 struct drm_crtc_state *new_crtc_state)
9937 {
9938         struct drm_plane_state *new_cursor_state, *new_primary_state;
9939         int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9940
9941         /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9942          * cursor per pipe but it's going to inherit the scaling and
9943          * positioning from the underlying pipe. Check the cursor plane's
9944          * blending properties match the primary plane's. */
9945
9946         new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9947         new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9948         if (!new_cursor_state || !new_primary_state ||
9949             !new_cursor_state->fb || !new_primary_state->fb) {
9950                 return 0;
9951         }
9952
9953         cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9954                          (new_cursor_state->src_w >> 16);
9955         cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9956                          (new_cursor_state->src_h >> 16);
9957
9958         primary_scale_w = new_primary_state->crtc_w * 1000 /
9959                          (new_primary_state->src_w >> 16);
9960         primary_scale_h = new_primary_state->crtc_h * 1000 /
9961                          (new_primary_state->src_h >> 16);
9962
9963         if (cursor_scale_w != primary_scale_w ||
9964             cursor_scale_h != primary_scale_h) {
9965                 DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9966                 return -EINVAL;
9967         }
9968
9969         return 0;
9970 }
9971
9972 #if defined(CONFIG_DRM_AMD_DC_DCN)
9973 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9974 {
9975         struct drm_connector *connector;
9976         struct drm_connector_state *conn_state;
9977         struct amdgpu_dm_connector *aconnector = NULL;
9978         int i;
9979         for_each_new_connector_in_state(state, connector, conn_state, i) {
9980                 if (conn_state->crtc != crtc)
9981                         continue;
9982
9983                 aconnector = to_amdgpu_dm_connector(connector);
9984                 if (!aconnector->port || !aconnector->mst_port)
9985                         aconnector = NULL;
9986                 else
9987                         break;
9988         }
9989
9990         if (!aconnector)
9991                 return 0;
9992
9993         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9994 }
9995 #endif
9996
9997 static int validate_overlay(struct drm_atomic_state *state)
9998 {
9999         int i;
10000         struct drm_plane *plane;
10001         struct drm_plane_state *old_plane_state, *new_plane_state;
10002         struct drm_plane_state *primary_state, *overlay_state = NULL;
10003
10004         /* Check if primary plane is contained inside overlay */
10005         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10006                 if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
10007                         if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10008                                 return 0;
10009
10010                         overlay_state = new_plane_state;
10011                         continue;
10012                 }
10013         }
10014
10015         /* check if we're making changes to the overlay plane */
10016         if (!overlay_state)
10017                 return 0;
10018
10019         /* check if overlay plane is enabled */
10020         if (!overlay_state->crtc)
10021                 return 0;
10022
10023         /* find the primary plane for the CRTC that the overlay is enabled on */
10024         primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
10025         if (IS_ERR(primary_state))
10026                 return PTR_ERR(primary_state);
10027
10028         /* check if primary plane is enabled */
10029         if (!primary_state->crtc)
10030                 return 0;
10031
10032         /* Perform the bounds check to ensure the overlay plane covers the primary */
10033         if (primary_state->crtc_x < overlay_state->crtc_x ||
10034             primary_state->crtc_y < overlay_state->crtc_y ||
10035             primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
10036             primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
10037                 DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
10038                 return -EINVAL;
10039         }
10040
10041         return 0;
10042 }
10043
10044 /**
10045  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10046  * @dev: The DRM device
10047  * @state: The atomic state to commit
10048  *
10049  * Validate that the given atomic state is programmable by DC into hardware.
10050  * This involves constructing a &struct dc_state reflecting the new hardware
10051  * state we wish to commit, then querying DC to see if it is programmable. It's
10052  * important not to modify the existing DC state. Otherwise, atomic_check
10053  * may unexpectedly commit hardware changes.
10054  *
10055  * When validating the DC state, it's important that the right locks are
10056  * acquired. For full updates case which removes/adds/updates streams on one
10057  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10058  * that any such full update commit will wait for completion of any outstanding
10059  * flip using DRMs synchronization events.
10060  *
10061  * Note that DM adds the affected connectors for all CRTCs in state, when that
10062  * might not seem necessary. This is because DC stream creation requires the
10063  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10064  * be possible but non-trivial - a possible TODO item.
10065  *
10066  * Return: -Error code if validation failed.
10067  */
10068 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10069                                   struct drm_atomic_state *state)
10070 {
10071         struct amdgpu_device *adev = drm_to_adev(dev);
10072         struct dm_atomic_state *dm_state = NULL;
10073         struct dc *dc = adev->dm.dc;
10074         struct drm_connector *connector;
10075         struct drm_connector_state *old_con_state, *new_con_state;
10076         struct drm_crtc *crtc;
10077         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10078         struct drm_plane *plane;
10079         struct drm_plane_state *old_plane_state, *new_plane_state;
10080         enum dc_status status;
10081         int ret, i;
10082         bool lock_and_validation_needed = false;
10083         struct dm_crtc_state *dm_old_crtc_state;
10084
10085         trace_amdgpu_dm_atomic_check_begin(state);
10086
10087         ret = drm_atomic_helper_check_modeset(dev, state);
10088         if (ret)
10089                 goto fail;
10090
10091         /* Check connector changes */
10092         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10093                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10094                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10095
10096                 /* Skip connectors that are disabled or part of modeset already. */
10097                 if (!old_con_state->crtc && !new_con_state->crtc)
10098                         continue;
10099
10100                 if (!new_con_state->crtc)
10101                         continue;
10102
10103                 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10104                 if (IS_ERR(new_crtc_state)) {
10105                         ret = PTR_ERR(new_crtc_state);
10106                         goto fail;
10107                 }
10108
10109                 if (dm_old_con_state->abm_level !=
10110                     dm_new_con_state->abm_level)
10111                         new_crtc_state->connectors_changed = true;
10112         }
10113
10114 #if defined(CONFIG_DRM_AMD_DC_DCN)
10115         if (dc_resource_is_dsc_encoding_supported(dc)) {
10116                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10117                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10118                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
10119                                 if (ret)
10120                                         goto fail;
10121                         }
10122                 }
10123         }
10124 #endif
10125         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10126                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10127
10128                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10129                     !new_crtc_state->color_mgmt_changed &&
10130                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10131                         dm_old_crtc_state->dsc_force_changed == false)
10132                         continue;
10133
10134                 if (!new_crtc_state->enable)
10135                         continue;
10136
10137                 ret = drm_atomic_add_affected_connectors(state, crtc);
10138                 if (ret)
10139                         return ret;
10140
10141                 ret = drm_atomic_add_affected_planes(state, crtc);
10142                 if (ret)
10143                         goto fail;
10144
10145                 if (dm_old_crtc_state->dsc_force_changed)
10146                         new_crtc_state->mode_changed = true;
10147         }
10148
10149         /*
10150          * Add all primary and overlay planes on the CRTC to the state
10151          * whenever a plane is enabled to maintain correct z-ordering
10152          * and to enable fast surface updates.
10153          */
10154         drm_for_each_crtc(crtc, dev) {
10155                 bool modified = false;
10156
10157                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10158                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
10159                                 continue;
10160
10161                         if (new_plane_state->crtc == crtc ||
10162                             old_plane_state->crtc == crtc) {
10163                                 modified = true;
10164                                 break;
10165                         }
10166                 }
10167
10168                 if (!modified)
10169                         continue;
10170
10171                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10172                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
10173                                 continue;
10174
10175                         new_plane_state =
10176                                 drm_atomic_get_plane_state(state, plane);
10177
10178                         if (IS_ERR(new_plane_state)) {
10179                                 ret = PTR_ERR(new_plane_state);
10180                                 goto fail;
10181                         }
10182                 }
10183         }
10184
10185         /* Remove exiting planes if they are modified */
10186         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10187                 ret = dm_update_plane_state(dc, state, plane,
10188                                             old_plane_state,
10189                                             new_plane_state,
10190                                             false,
10191                                             &lock_and_validation_needed);
10192                 if (ret)
10193                         goto fail;
10194         }
10195
10196         /* Disable all crtcs which require disable */
10197         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10198                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10199                                            old_crtc_state,
10200                                            new_crtc_state,
10201                                            false,
10202                                            &lock_and_validation_needed);
10203                 if (ret)
10204                         goto fail;
10205         }
10206
10207         /* Enable all crtcs which require enable */
10208         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10209                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10210                                            old_crtc_state,
10211                                            new_crtc_state,
10212                                            true,
10213                                            &lock_and_validation_needed);
10214                 if (ret)
10215                         goto fail;
10216         }
10217
10218         ret = validate_overlay(state);
10219         if (ret)
10220                 goto fail;
10221
10222         /* Add new/modified planes */
10223         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10224                 ret = dm_update_plane_state(dc, state, plane,
10225                                             old_plane_state,
10226                                             new_plane_state,
10227                                             true,
10228                                             &lock_and_validation_needed);
10229                 if (ret)
10230                         goto fail;
10231         }
10232
10233         /* Run this here since we want to validate the streams we created */
10234         ret = drm_atomic_helper_check_planes(dev, state);
10235         if (ret)
10236                 goto fail;
10237
10238         /* Check cursor planes scaling */
10239         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10240                 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10241                 if (ret)
10242                         goto fail;
10243         }
10244
10245         if (state->legacy_cursor_update) {
10246                 /*
10247                  * This is a fast cursor update coming from the plane update
10248                  * helper, check if it can be done asynchronously for better
10249                  * performance.
10250                  */
10251                 state->async_update =
10252                         !drm_atomic_helper_async_check(dev, state);
10253
10254                 /*
10255                  * Skip the remaining global validation if this is an async
10256                  * update. Cursor updates can be done without affecting
10257                  * state or bandwidth calcs and this avoids the performance
10258                  * penalty of locking the private state object and
10259                  * allocating a new dc_state.
10260                  */
10261                 if (state->async_update)
10262                         return 0;
10263         }
10264
10265         /* Check scaling and underscan changes*/
10266         /* TODO Removed scaling changes validation due to inability to commit
10267          * new stream into context w\o causing full reset. Need to
10268          * decide how to handle.
10269          */
10270         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10271                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10272                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10273                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10274
10275                 /* Skip any modesets/resets */
10276                 if (!acrtc || drm_atomic_crtc_needs_modeset(
10277                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10278                         continue;
10279
10280                 /* Skip any thing not scale or underscan changes */
10281                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10282                         continue;
10283
10284                 lock_and_validation_needed = true;
10285         }
10286
10287         /**
10288          * Streams and planes are reset when there are changes that affect
10289          * bandwidth. Anything that affects bandwidth needs to go through
10290          * DC global validation to ensure that the configuration can be applied
10291          * to hardware.
10292          *
10293          * We have to currently stall out here in atomic_check for outstanding
10294          * commits to finish in this case because our IRQ handlers reference
10295          * DRM state directly - we can end up disabling interrupts too early
10296          * if we don't.
10297          *
10298          * TODO: Remove this stall and drop DM state private objects.
10299          */
10300         if (lock_and_validation_needed) {
10301                 ret = dm_atomic_get_state(state, &dm_state);
10302                 if (ret)
10303                         goto fail;
10304
10305                 ret = do_aquire_global_lock(dev, state);
10306                 if (ret)
10307                         goto fail;
10308
10309 #if defined(CONFIG_DRM_AMD_DC_DCN)
10310                 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
10311                         goto fail;
10312
10313                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
10314                 if (ret)
10315                         goto fail;
10316 #endif
10317
10318                 /*
10319                  * Perform validation of MST topology in the state:
10320                  * We need to perform MST atomic check before calling
10321                  * dc_validate_global_state(), or there is a chance
10322                  * to get stuck in an infinite loop and hang eventually.
10323                  */
10324                 ret = drm_dp_mst_atomic_check(state);
10325                 if (ret)
10326                         goto fail;
10327                 status = dc_validate_global_state(dc, dm_state->context, false);
10328                 if (status != DC_OK) {
10329                         DC_LOG_WARNING("DC global validation failure: %s (%d)",
10330                                        dc_status_to_str(status), status);
10331                         ret = -EINVAL;
10332                         goto fail;
10333                 }
10334         } else {
10335                 /*
10336                  * The commit is a fast update. Fast updates shouldn't change
10337                  * the DC context, affect global validation, and can have their
10338                  * commit work done in parallel with other commits not touching
10339                  * the same resource. If we have a new DC context as part of
10340                  * the DM atomic state from validation we need to free it and
10341                  * retain the existing one instead.
10342                  *
10343                  * Furthermore, since the DM atomic state only contains the DC
10344                  * context and can safely be annulled, we can free the state
10345                  * and clear the associated private object now to free
10346                  * some memory and avoid a possible use-after-free later.
10347                  */
10348
10349                 for (i = 0; i < state->num_private_objs; i++) {
10350                         struct drm_private_obj *obj = state->private_objs[i].ptr;
10351
10352                         if (obj->funcs == adev->dm.atomic_obj.funcs) {
10353                                 int j = state->num_private_objs-1;
10354
10355                                 dm_atomic_destroy_state(obj,
10356                                                 state->private_objs[i].state);
10357
10358                                 /* If i is not at the end of the array then the
10359                                  * last element needs to be moved to where i was
10360                                  * before the array can safely be truncated.
10361                                  */
10362                                 if (i != j)
10363                                         state->private_objs[i] =
10364                                                 state->private_objs[j];
10365
10366                                 state->private_objs[j].ptr = NULL;
10367                                 state->private_objs[j].state = NULL;
10368                                 state->private_objs[j].old_state = NULL;
10369                                 state->private_objs[j].new_state = NULL;
10370
10371                                 state->num_private_objs = j;
10372                                 break;
10373                         }
10374                 }
10375         }
10376
10377         /* Store the overall update type for use later in atomic check. */
10378         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10379                 struct dm_crtc_state *dm_new_crtc_state =
10380                         to_dm_crtc_state(new_crtc_state);
10381
10382                 dm_new_crtc_state->update_type = lock_and_validation_needed ?
10383                                                          UPDATE_TYPE_FULL :
10384                                                          UPDATE_TYPE_FAST;
10385         }
10386
10387         /* Must be success */
10388         WARN_ON(ret);
10389
10390         trace_amdgpu_dm_atomic_check_finish(state, ret);
10391
10392         return ret;
10393
10394 fail:
10395         if (ret == -EDEADLK)
10396                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
10397         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
10398                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
10399         else
10400                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
10401
10402         trace_amdgpu_dm_atomic_check_finish(state, ret);
10403
10404         return ret;
10405 }
10406
10407 static bool is_dp_capable_without_timing_msa(struct dc *dc,
10408                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
10409 {
10410         uint8_t dpcd_data;
10411         bool capable = false;
10412
10413         if (amdgpu_dm_connector->dc_link &&
10414                 dm_helpers_dp_read_dpcd(
10415                                 NULL,
10416                                 amdgpu_dm_connector->dc_link,
10417                                 DP_DOWN_STREAM_PORT_COUNT,
10418                                 &dpcd_data,
10419                                 sizeof(dpcd_data))) {
10420                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10421         }
10422
10423         return capable;
10424 }
10425
10426 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10427                 uint8_t *edid_ext, int len,
10428                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10429 {
10430         int i;
10431         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10432         struct dc *dc = adev->dm.dc;
10433
10434         /* send extension block to DMCU for parsing */
10435         for (i = 0; i < len; i += 8) {
10436                 bool res;
10437                 int offset;
10438
10439                 /* send 8 bytes a time */
10440                 if (!dc_edid_parser_send_cea(dc, i, len, &edid_ext[i], 8))
10441                         return false;
10442
10443                 if (i+8 == len) {
10444                         /* EDID block sent completed, expect result */
10445                         int version, min_rate, max_rate;
10446
10447                         res = dc_edid_parser_recv_amd_vsdb(dc, &version, &min_rate, &max_rate);
10448                         if (res) {
10449                                 /* amd vsdb found */
10450                                 vsdb_info->freesync_supported = 1;
10451                                 vsdb_info->amd_vsdb_version = version;
10452                                 vsdb_info->min_refresh_rate_hz = min_rate;
10453                                 vsdb_info->max_refresh_rate_hz = max_rate;
10454                                 return true;
10455                         }
10456                         /* not amd vsdb */
10457                         return false;
10458                 }
10459
10460                 /* check for ack*/
10461                 res = dc_edid_parser_recv_cea_ack(dc, &offset);
10462                 if (!res)
10463                         return false;
10464         }
10465
10466         return false;
10467 }
10468
10469 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10470                 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10471 {
10472         uint8_t *edid_ext = NULL;
10473         int i;
10474         bool valid_vsdb_found = false;
10475
10476         /*----- drm_find_cea_extension() -----*/
10477         /* No EDID or EDID extensions */
10478         if (edid == NULL || edid->extensions == 0)
10479                 return -ENODEV;
10480
10481         /* Find CEA extension */
10482         for (i = 0; i < edid->extensions; i++) {
10483                 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10484                 if (edid_ext[0] == CEA_EXT)
10485                         break;
10486         }
10487
10488         if (i == edid->extensions)
10489                 return -ENODEV;
10490
10491         /*----- cea_db_offsets() -----*/
10492         if (edid_ext[0] != CEA_EXT)
10493                 return -ENODEV;
10494
10495         valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
10496
10497         return valid_vsdb_found ? i : -ENODEV;
10498 }
10499
10500 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10501                                         struct edid *edid)
10502 {
10503         int i = 0;
10504         struct detailed_timing *timing;
10505         struct detailed_non_pixel *data;
10506         struct detailed_data_monitor_range *range;
10507         struct amdgpu_dm_connector *amdgpu_dm_connector =
10508                         to_amdgpu_dm_connector(connector);
10509         struct dm_connector_state *dm_con_state = NULL;
10510
10511         struct drm_device *dev = connector->dev;
10512         struct amdgpu_device *adev = drm_to_adev(dev);
10513         bool freesync_capable = false;
10514         struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
10515
10516         if (!connector->state) {
10517                 DRM_ERROR("%s - Connector has no state", __func__);
10518                 goto update;
10519         }
10520
10521         if (!edid) {
10522                 dm_con_state = to_dm_connector_state(connector->state);
10523
10524                 amdgpu_dm_connector->min_vfreq = 0;
10525                 amdgpu_dm_connector->max_vfreq = 0;
10526                 amdgpu_dm_connector->pixel_clock_mhz = 0;
10527
10528                 goto update;
10529         }
10530
10531         dm_con_state = to_dm_connector_state(connector->state);
10532
10533         if (!amdgpu_dm_connector->dc_sink) {
10534                 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
10535                 goto update;
10536         }
10537         if (!adev->dm.freesync_module)
10538                 goto update;
10539
10540
10541         if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10542                 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
10543                 bool edid_check_required = false;
10544
10545                 if (edid) {
10546                         edid_check_required = is_dp_capable_without_timing_msa(
10547                                                 adev->dm.dc,
10548                                                 amdgpu_dm_connector);
10549                 }
10550
10551                 if (edid_check_required == true && (edid->version > 1 ||
10552                    (edid->version == 1 && edid->revision > 1))) {
10553                         for (i = 0; i < 4; i++) {
10554
10555                                 timing  = &edid->detailed_timings[i];
10556                                 data    = &timing->data.other_data;
10557                                 range   = &data->data.range;
10558                                 /*
10559                                  * Check if monitor has continuous frequency mode
10560                                  */
10561                                 if (data->type != EDID_DETAIL_MONITOR_RANGE)
10562                                         continue;
10563                                 /*
10564                                  * Check for flag range limits only. If flag == 1 then
10565                                  * no additional timing information provided.
10566                                  * Default GTF, GTF Secondary curve and CVT are not
10567                                  * supported
10568                                  */
10569                                 if (range->flags != 1)
10570                                         continue;
10571
10572                                 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10573                                 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10574                                 amdgpu_dm_connector->pixel_clock_mhz =
10575                                         range->pixel_clock_mhz * 10;
10576
10577                                 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10578                                 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10579
10580                                 break;
10581                         }
10582
10583                         if (amdgpu_dm_connector->max_vfreq -
10584                             amdgpu_dm_connector->min_vfreq > 10) {
10585
10586                                 freesync_capable = true;
10587                         }
10588                 }
10589         } else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10590                 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10591                 if (i >= 0 && vsdb_info.freesync_supported) {
10592                         timing  = &edid->detailed_timings[i];
10593                         data    = &timing->data.other_data;
10594
10595                         amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10596                         amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10597                         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10598                                 freesync_capable = true;
10599
10600                         connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10601                         connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10602                 }
10603         }
10604
10605 update:
10606         if (dm_con_state)
10607                 dm_con_state->freesync_capable = freesync_capable;
10608
10609         if (connector->vrr_capable_property)
10610                 drm_connector_set_vrr_capable_property(connector,
10611                                                        freesync_capable);
10612 }
10613
10614 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
10615 {
10616         uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
10617
10618         if (!(link->connector_signal & SIGNAL_TYPE_EDP))
10619                 return;
10620         if (link->type == dc_connection_none)
10621                 return;
10622         if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
10623                                         dpcd_data, sizeof(dpcd_data))) {
10624                 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
10625
10626                 if (dpcd_data[0] == 0) {
10627                         link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
10628                         link->psr_settings.psr_feature_enabled = false;
10629                 } else {
10630                         link->psr_settings.psr_version = DC_PSR_VERSION_1;
10631                         link->psr_settings.psr_feature_enabled = true;
10632                 }
10633
10634                 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
10635         }
10636 }
10637
10638 /*
10639  * amdgpu_dm_link_setup_psr() - configure psr link
10640  * @stream: stream state
10641  *
10642  * Return: true if success
10643  */
10644 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
10645 {
10646         struct dc_link *link = NULL;
10647         struct psr_config psr_config = {0};
10648         struct psr_context psr_context = {0};
10649         bool ret = false;
10650
10651         if (stream == NULL)
10652                 return false;
10653
10654         link = stream->link;
10655
10656         psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
10657
10658         if (psr_config.psr_version > 0) {
10659                 psr_config.psr_exit_link_training_required = 0x1;
10660                 psr_config.psr_frame_capture_indication_req = 0;
10661                 psr_config.psr_rfb_setup_time = 0x37;
10662                 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
10663                 psr_config.allow_smu_optimizations = 0x0;
10664
10665                 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
10666
10667         }
10668         DRM_DEBUG_DRIVER("PSR link: %d\n",      link->psr_settings.psr_feature_enabled);
10669
10670         return ret;
10671 }
10672
10673 /*
10674  * amdgpu_dm_psr_enable() - enable psr f/w
10675  * @stream: stream state
10676  *
10677  * Return: true if success
10678  */
10679 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
10680 {
10681         struct dc_link *link = stream->link;
10682         unsigned int vsync_rate_hz = 0;
10683         struct dc_static_screen_params params = {0};
10684         /* Calculate number of static frames before generating interrupt to
10685          * enter PSR.
10686          */
10687         // Init fail safe of 2 frames static
10688         unsigned int num_frames_static = 2;
10689
10690         DRM_DEBUG_DRIVER("Enabling psr...\n");
10691
10692         vsync_rate_hz = div64_u64(div64_u64((
10693                         stream->timing.pix_clk_100hz * 100),
10694                         stream->timing.v_total),
10695                         stream->timing.h_total);
10696
10697         /* Round up
10698          * Calculate number of frames such that at least 30 ms of time has
10699          * passed.
10700          */
10701         if (vsync_rate_hz != 0) {
10702                 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
10703                 num_frames_static = (30000 / frame_time_microsec) + 1;
10704         }
10705
10706         params.triggers.cursor_update = true;
10707         params.triggers.overlay_update = true;
10708         params.triggers.surface_update = true;
10709         params.num_frames = num_frames_static;
10710
10711         dc_stream_set_static_screen_params(link->ctx->dc,
10712                                            &stream, 1,
10713                                            &params);
10714
10715         return dc_link_set_psr_allow_active(link, true, false, false);
10716 }
10717
10718 /*
10719  * amdgpu_dm_psr_disable() - disable psr f/w
10720  * @stream:  stream state
10721  *
10722  * Return: true if success
10723  */
10724 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
10725 {
10726
10727         DRM_DEBUG_DRIVER("Disabling psr...\n");
10728
10729         return dc_link_set_psr_allow_active(stream->link, false, true, false);
10730 }
10731
10732 /*
10733  * amdgpu_dm_psr_disable() - disable psr f/w
10734  * if psr is enabled on any stream
10735  *
10736  * Return: true if success
10737  */
10738 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
10739 {
10740         DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
10741         return dc_set_psr_allow_active(dm->dc, false);
10742 }
10743
10744 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10745 {
10746         struct amdgpu_device *adev = drm_to_adev(dev);
10747         struct dc *dc = adev->dm.dc;
10748         int i;
10749
10750         mutex_lock(&adev->dm.dc_lock);
10751         if (dc->current_state) {
10752                 for (i = 0; i < dc->current_state->stream_count; ++i)
10753                         dc->current_state->streams[i]
10754                                 ->triggered_crtc_reset.enabled =
10755                                 adev->dm.force_timing_sync;
10756
10757                 dm_enable_per_frame_crtc_master_sync(dc->current_state);
10758                 dc_trigger_sync(dc, dc->current_state);
10759         }
10760         mutex_unlock(&adev->dm.dc_lock);
10761 }
10762
10763 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10764                        uint32_t value, const char *func_name)
10765 {
10766 #ifdef DM_CHECK_ADDR_0
10767         if (address == 0) {
10768                 DC_ERR("invalid register write. address = 0");
10769                 return;
10770         }
10771 #endif
10772         cgs_write_register(ctx->cgs_device, address, value);
10773         trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10774 }
10775
10776 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10777                           const char *func_name)
10778 {
10779         uint32_t value;
10780 #ifdef DM_CHECK_ADDR_0
10781         if (address == 0) {
10782                 DC_ERR("invalid register read; address = 0\n");
10783                 return 0;
10784         }
10785 #endif
10786
10787         if (ctx->dmub_srv &&
10788             ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10789             !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10790                 ASSERT(false);
10791                 return 0;
10792         }
10793
10794         value = cgs_read_register(ctx->cgs_device, address);
10795
10796         trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10797
10798         return value;
10799 }
10800
10801 int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int linkIndex,
10802                                 struct aux_payload *payload, enum aux_return_code_type *operation_result)
10803 {
10804         struct amdgpu_device *adev = ctx->driver_context;
10805         int ret = 0;
10806
10807         dc_process_dmub_aux_transfer_async(ctx->dc, linkIndex, payload);
10808         ret = wait_for_completion_interruptible_timeout(&adev->dm.dmub_aux_transfer_done, 10*HZ);
10809         if (ret == 0) {
10810                 *operation_result = AUX_RET_ERROR_TIMEOUT;
10811                 return -1;
10812         }
10813         *operation_result = (enum aux_return_code_type)adev->dm.dmub_notify->result;
10814
10815         if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
10816                 (*payload->reply) = adev->dm.dmub_notify->aux_reply.command;
10817
10818                 // For read case, Copy data to payload
10819                 if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
10820                 (*payload->reply == AUX_TRANSACTION_REPLY_AUX_ACK))
10821                         memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
10822                         adev->dm.dmub_notify->aux_reply.length);
10823         }
10824
10825         return adev->dm.dmub_notify->aux_reply.length;
10826 }