drm/amd/display: Don't ASSERT when total_planes == AMDGPU_MAX_PLANES
[platform/kernel/linux-rpi.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32
33 #include "vid.h"
34 #include "amdgpu.h"
35 #include "amdgpu_display.h"
36 #include "amdgpu_ucode.h"
37 #include "atom.h"
38 #include "amdgpu_dm.h"
39 #include "amdgpu_pm.h"
40
41 #include "amd_shared.h"
42 #include "amdgpu_dm_irq.h"
43 #include "dm_helpers.h"
44 #include "amdgpu_dm_mst_types.h"
45 #if defined(CONFIG_DEBUG_FS)
46 #include "amdgpu_dm_debugfs.h"
47 #endif
48
49 #include "ivsrcid/ivsrcid_vislands30.h"
50
51 #include <linux/module.h>
52 #include <linux/moduleparam.h>
53 #include <linux/version.h>
54 #include <linux/types.h>
55 #include <linux/pm_runtime.h>
56 #include <linux/firmware.h>
57
58 #include <drm/drmP.h>
59 #include <drm/drm_atomic.h>
60 #include <drm/drm_atomic_uapi.h>
61 #include <drm/drm_atomic_helper.h>
62 #include <drm/drm_dp_mst_helper.h>
63 #include <drm/drm_fb_helper.h>
64 #include <drm/drm_edid.h>
65
66 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
67 #include "ivsrcid/irqsrcs_dcn_1_0.h"
68
69 #include "dcn/dcn_1_0_offset.h"
70 #include "dcn/dcn_1_0_sh_mask.h"
71 #include "soc15_hw_ip.h"
72 #include "vega10_ip_offset.h"
73
74 #include "soc15_common.h"
75 #endif
76
77 #include "modules/inc/mod_freesync.h"
78 #include "modules/power/power_helpers.h"
79 #include "modules/inc/mod_info_packet.h"
80
81 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
82 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
83
84 /**
85  * DOC: overview
86  *
87  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
88  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
89  * requests into DC requests, and DC responses into DRM responses.
90  *
91  * The root control structure is &struct amdgpu_display_manager.
92  */
93
94 /* basic init/fini API */
95 static int amdgpu_dm_init(struct amdgpu_device *adev);
96 static void amdgpu_dm_fini(struct amdgpu_device *adev);
97
98 /*
99  * initializes drm_device display related structures, based on the information
100  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
101  * drm_encoder, drm_mode_config
102  *
103  * Returns 0 on success
104  */
105 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
106 /* removes and deallocates the drm structures, created by the above function */
107 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
108
109 static void
110 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector);
111
112 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
113                                 struct drm_plane *plane,
114                                 unsigned long possible_crtcs);
115 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
116                                struct drm_plane *plane,
117                                uint32_t link_index);
118 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
119                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
120                                     uint32_t link_index,
121                                     struct amdgpu_encoder *amdgpu_encoder);
122 static int amdgpu_dm_encoder_init(struct drm_device *dev,
123                                   struct amdgpu_encoder *aencoder,
124                                   uint32_t link_index);
125
126 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
127
128 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
129                                    struct drm_atomic_state *state,
130                                    bool nonblock);
131
132 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
133
134 static int amdgpu_dm_atomic_check(struct drm_device *dev,
135                                   struct drm_atomic_state *state);
136
137 static void handle_cursor_update(struct drm_plane *plane,
138                                  struct drm_plane_state *old_plane_state);
139
140 /*
141  * dm_vblank_get_counter
142  *
143  * @brief
144  * Get counter for number of vertical blanks
145  *
146  * @param
147  * struct amdgpu_device *adev - [in] desired amdgpu device
148  * int disp_idx - [in] which CRTC to get the counter from
149  *
150  * @return
151  * Counter for vertical blanks
152  */
153 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
154 {
155         if (crtc >= adev->mode_info.num_crtc)
156                 return 0;
157         else {
158                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
159                 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
160                                 acrtc->base.state);
161
162
163                 if (acrtc_state->stream == NULL) {
164                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
165                                   crtc);
166                         return 0;
167                 }
168
169                 return dc_stream_get_vblank_counter(acrtc_state->stream);
170         }
171 }
172
173 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
174                                   u32 *vbl, u32 *position)
175 {
176         uint32_t v_blank_start, v_blank_end, h_position, v_position;
177
178         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
179                 return -EINVAL;
180         else {
181                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
182                 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
183                                                 acrtc->base.state);
184
185                 if (acrtc_state->stream ==  NULL) {
186                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
187                                   crtc);
188                         return 0;
189                 }
190
191                 /*
192                  * TODO rework base driver to use values directly.
193                  * for now parse it back into reg-format
194                  */
195                 dc_stream_get_scanoutpos(acrtc_state->stream,
196                                          &v_blank_start,
197                                          &v_blank_end,
198                                          &h_position,
199                                          &v_position);
200
201                 *position = v_position | (h_position << 16);
202                 *vbl = v_blank_start | (v_blank_end << 16);
203         }
204
205         return 0;
206 }
207
208 static bool dm_is_idle(void *handle)
209 {
210         /* XXX todo */
211         return true;
212 }
213
214 static int dm_wait_for_idle(void *handle)
215 {
216         /* XXX todo */
217         return 0;
218 }
219
220 static bool dm_check_soft_reset(void *handle)
221 {
222         return false;
223 }
224
225 static int dm_soft_reset(void *handle)
226 {
227         /* XXX todo */
228         return 0;
229 }
230
231 static struct amdgpu_crtc *
232 get_crtc_by_otg_inst(struct amdgpu_device *adev,
233                      int otg_inst)
234 {
235         struct drm_device *dev = adev->ddev;
236         struct drm_crtc *crtc;
237         struct amdgpu_crtc *amdgpu_crtc;
238
239         if (otg_inst == -1) {
240                 WARN_ON(1);
241                 return adev->mode_info.crtcs[0];
242         }
243
244         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
245                 amdgpu_crtc = to_amdgpu_crtc(crtc);
246
247                 if (amdgpu_crtc->otg_inst == otg_inst)
248                         return amdgpu_crtc;
249         }
250
251         return NULL;
252 }
253
254 static void dm_pflip_high_irq(void *interrupt_params)
255 {
256         struct amdgpu_crtc *amdgpu_crtc;
257         struct common_irq_params *irq_params = interrupt_params;
258         struct amdgpu_device *adev = irq_params->adev;
259         unsigned long flags;
260
261         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
262
263         /* IRQ could occur when in initial stage */
264         /* TODO work and BO cleanup */
265         if (amdgpu_crtc == NULL) {
266                 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
267                 return;
268         }
269
270         spin_lock_irqsave(&adev->ddev->event_lock, flags);
271
272         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
273                 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
274                                                  amdgpu_crtc->pflip_status,
275                                                  AMDGPU_FLIP_SUBMITTED,
276                                                  amdgpu_crtc->crtc_id,
277                                                  amdgpu_crtc);
278                 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
279                 return;
280         }
281
282         /* Update to correct count(s) if racing with vblank irq */
283         amdgpu_crtc->last_flip_vblank = drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
284
285         /* wake up userspace */
286         if (amdgpu_crtc->event) {
287                 drm_crtc_send_vblank_event(&amdgpu_crtc->base, amdgpu_crtc->event);
288
289                 /* page flip completed. clean up */
290                 amdgpu_crtc->event = NULL;
291
292         } else
293                 WARN_ON(1);
294
295         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
296         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
297
298         DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE\n",
299                                         __func__, amdgpu_crtc->crtc_id, amdgpu_crtc);
300
301         drm_crtc_vblank_put(&amdgpu_crtc->base);
302 }
303
304 static void dm_crtc_high_irq(void *interrupt_params)
305 {
306         struct common_irq_params *irq_params = interrupt_params;
307         struct amdgpu_device *adev = irq_params->adev;
308         struct amdgpu_crtc *acrtc;
309         struct dm_crtc_state *acrtc_state;
310
311         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
312
313         if (acrtc) {
314                 drm_crtc_handle_vblank(&acrtc->base);
315                 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
316
317                 acrtc_state = to_dm_crtc_state(acrtc->base.state);
318
319                 if (acrtc_state->stream &&
320                     acrtc_state->vrr_params.supported &&
321                     acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
322                         mod_freesync_handle_v_update(
323                                 adev->dm.freesync_module,
324                                 acrtc_state->stream,
325                                 &acrtc_state->vrr_params);
326
327                         dc_stream_adjust_vmin_vmax(
328                                 adev->dm.dc,
329                                 acrtc_state->stream,
330                                 &acrtc_state->vrr_params.adjust);
331                 }
332         }
333 }
334
335 static int dm_set_clockgating_state(void *handle,
336                   enum amd_clockgating_state state)
337 {
338         return 0;
339 }
340
341 static int dm_set_powergating_state(void *handle,
342                   enum amd_powergating_state state)
343 {
344         return 0;
345 }
346
347 /* Prototypes of private functions */
348 static int dm_early_init(void* handle);
349
350 /* Allocate memory for FBC compressed data  */
351 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
352 {
353         struct drm_device *dev = connector->dev;
354         struct amdgpu_device *adev = dev->dev_private;
355         struct dm_comressor_info *compressor = &adev->dm.compressor;
356         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
357         struct drm_display_mode *mode;
358         unsigned long max_size = 0;
359
360         if (adev->dm.dc->fbc_compressor == NULL)
361                 return;
362
363         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
364                 return;
365
366         if (compressor->bo_ptr)
367                 return;
368
369
370         list_for_each_entry(mode, &connector->modes, head) {
371                 if (max_size < mode->htotal * mode->vtotal)
372                         max_size = mode->htotal * mode->vtotal;
373         }
374
375         if (max_size) {
376                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
377                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
378                             &compressor->gpu_addr, &compressor->cpu_addr);
379
380                 if (r)
381                         DRM_ERROR("DM: Failed to initialize FBC\n");
382                 else {
383                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
384                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
385                 }
386
387         }
388
389 }
390
391 static int amdgpu_dm_init(struct amdgpu_device *adev)
392 {
393         struct dc_init_data init_data;
394         adev->dm.ddev = adev->ddev;
395         adev->dm.adev = adev;
396
397         /* Zero all the fields */
398         memset(&init_data, 0, sizeof(init_data));
399
400         mutex_init(&adev->dm.dc_lock);
401
402         if(amdgpu_dm_irq_init(adev)) {
403                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
404                 goto error;
405         }
406
407         init_data.asic_id.chip_family = adev->family;
408
409         init_data.asic_id.pci_revision_id = adev->rev_id;
410         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
411
412         init_data.asic_id.vram_width = adev->gmc.vram_width;
413         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
414         init_data.asic_id.atombios_base_address =
415                 adev->mode_info.atom_context->bios;
416
417         init_data.driver = adev;
418
419         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
420
421         if (!adev->dm.cgs_device) {
422                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
423                 goto error;
424         }
425
426         init_data.cgs_device = adev->dm.cgs_device;
427
428         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
429
430         /*
431          * TODO debug why this doesn't work on Raven
432          */
433         if (adev->flags & AMD_IS_APU &&
434             adev->asic_type >= CHIP_CARRIZO &&
435             adev->asic_type < CHIP_RAVEN)
436                 init_data.flags.gpu_vm_support = true;
437
438         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
439                 init_data.flags.fbc_support = true;
440
441         /* Display Core create. */
442         adev->dm.dc = dc_create(&init_data);
443
444         if (adev->dm.dc) {
445                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
446         } else {
447                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
448                 goto error;
449         }
450
451         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
452         if (!adev->dm.freesync_module) {
453                 DRM_ERROR(
454                 "amdgpu: failed to initialize freesync_module.\n");
455         } else
456                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
457                                 adev->dm.freesync_module);
458
459         amdgpu_dm_init_color_mod();
460
461         if (amdgpu_dm_initialize_drm_device(adev)) {
462                 DRM_ERROR(
463                 "amdgpu: failed to initialize sw for display support.\n");
464                 goto error;
465         }
466
467         /* Update the actual used number of crtc */
468         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
469
470         /* TODO: Add_display_info? */
471
472         /* TODO use dynamic cursor width */
473         adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
474         adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
475
476         if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
477                 DRM_ERROR(
478                 "amdgpu: failed to initialize sw for display support.\n");
479                 goto error;
480         }
481
482 #if defined(CONFIG_DEBUG_FS)
483         if (dtn_debugfs_init(adev))
484                 DRM_ERROR("amdgpu: failed initialize dtn debugfs support.\n");
485 #endif
486
487         DRM_DEBUG_DRIVER("KMS initialized.\n");
488
489         return 0;
490 error:
491         amdgpu_dm_fini(adev);
492
493         return -EINVAL;
494 }
495
496 static void amdgpu_dm_fini(struct amdgpu_device *adev)
497 {
498         amdgpu_dm_destroy_drm_device(&adev->dm);
499         /*
500          * TODO: pageflip, vlank interrupt
501          *
502          * amdgpu_dm_irq_fini(adev);
503          */
504
505         if (adev->dm.cgs_device) {
506                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
507                 adev->dm.cgs_device = NULL;
508         }
509         if (adev->dm.freesync_module) {
510                 mod_freesync_destroy(adev->dm.freesync_module);
511                 adev->dm.freesync_module = NULL;
512         }
513         /* DC Destroy TODO: Replace destroy DAL */
514         if (adev->dm.dc)
515                 dc_destroy(&adev->dm.dc);
516
517         mutex_destroy(&adev->dm.dc_lock);
518
519         return;
520 }
521
522 static int load_dmcu_fw(struct amdgpu_device *adev)
523 {
524         const char *fw_name_dmcu;
525         int r;
526         const struct dmcu_firmware_header_v1_0 *hdr;
527
528         switch(adev->asic_type) {
529         case CHIP_BONAIRE:
530         case CHIP_HAWAII:
531         case CHIP_KAVERI:
532         case CHIP_KABINI:
533         case CHIP_MULLINS:
534         case CHIP_TONGA:
535         case CHIP_FIJI:
536         case CHIP_CARRIZO:
537         case CHIP_STONEY:
538         case CHIP_POLARIS11:
539         case CHIP_POLARIS10:
540         case CHIP_POLARIS12:
541         case CHIP_VEGAM:
542         case CHIP_VEGA10:
543         case CHIP_VEGA12:
544         case CHIP_VEGA20:
545                 return 0;
546         case CHIP_RAVEN:
547                 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
548                 break;
549         default:
550                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
551                 return -EINVAL;
552         }
553
554         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
555                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
556                 return 0;
557         }
558
559         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
560         if (r == -ENOENT) {
561                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
562                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
563                 adev->dm.fw_dmcu = NULL;
564                 return 0;
565         }
566         if (r) {
567                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
568                         fw_name_dmcu);
569                 return r;
570         }
571
572         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
573         if (r) {
574                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
575                         fw_name_dmcu);
576                 release_firmware(adev->dm.fw_dmcu);
577                 adev->dm.fw_dmcu = NULL;
578                 return r;
579         }
580
581         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
582         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
583         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
584         adev->firmware.fw_size +=
585                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
586
587         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
588         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
589         adev->firmware.fw_size +=
590                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
591
592         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
593
594         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
595
596         return 0;
597 }
598
599 static int dm_sw_init(void *handle)
600 {
601         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
602
603         return load_dmcu_fw(adev);
604 }
605
606 static int dm_sw_fini(void *handle)
607 {
608         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
609
610         if(adev->dm.fw_dmcu) {
611                 release_firmware(adev->dm.fw_dmcu);
612                 adev->dm.fw_dmcu = NULL;
613         }
614
615         return 0;
616 }
617
618 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
619 {
620         struct amdgpu_dm_connector *aconnector;
621         struct drm_connector *connector;
622         int ret = 0;
623
624         drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
625
626         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
627                 aconnector = to_amdgpu_dm_connector(connector);
628                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
629                     aconnector->mst_mgr.aux) {
630                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
631                                         aconnector, aconnector->base.base.id);
632
633                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
634                         if (ret < 0) {
635                                 DRM_ERROR("DM_MST: Failed to start MST\n");
636                                 ((struct dc_link *)aconnector->dc_link)->type = dc_connection_single;
637                                 return ret;
638                                 }
639                         }
640         }
641
642         drm_modeset_unlock(&dev->mode_config.connection_mutex);
643         return ret;
644 }
645
646 static int dm_late_init(void *handle)
647 {
648         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
649
650         struct dmcu_iram_parameters params;
651         unsigned int linear_lut[16];
652         int i;
653         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
654         bool ret;
655
656         for (i = 0; i < 16; i++)
657                 linear_lut[i] = 0xFFFF * i / 15;
658
659         params.set = 0;
660         params.backlight_ramping_start = 0xCCCC;
661         params.backlight_ramping_reduction = 0xCCCCCCCC;
662         params.backlight_lut_array_size = 16;
663         params.backlight_lut_array = linear_lut;
664
665         ret = dmcu_load_iram(dmcu, params);
666
667         if (!ret)
668                 return -EINVAL;
669
670         return detect_mst_link_for_all_connectors(adev->ddev);
671 }
672
673 static void s3_handle_mst(struct drm_device *dev, bool suspend)
674 {
675         struct amdgpu_dm_connector *aconnector;
676         struct drm_connector *connector;
677         struct drm_dp_mst_topology_mgr *mgr;
678         int ret;
679         bool need_hotplug = false;
680
681         drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
682
683         list_for_each_entry(connector, &dev->mode_config.connector_list,
684                             head) {
685                 aconnector = to_amdgpu_dm_connector(connector);
686                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
687                     aconnector->mst_port)
688                         continue;
689
690                 mgr = &aconnector->mst_mgr;
691
692                 if (suspend) {
693                         drm_dp_mst_topology_mgr_suspend(mgr);
694                 } else {
695                         ret = drm_dp_mst_topology_mgr_resume(mgr);
696                         if (ret < 0) {
697                                 drm_dp_mst_topology_mgr_set_mst(mgr, false);
698                                 need_hotplug = true;
699                         }
700                 }
701         }
702
703         drm_modeset_unlock(&dev->mode_config.connection_mutex);
704
705         if (need_hotplug)
706                 drm_kms_helper_hotplug_event(dev);
707 }
708
709 /**
710  * dm_hw_init() - Initialize DC device
711  * @handle: The base driver device containing the amdpgu_dm device.
712  *
713  * Initialize the &struct amdgpu_display_manager device. This involves calling
714  * the initializers of each DM component, then populating the struct with them.
715  *
716  * Although the function implies hardware initialization, both hardware and
717  * software are initialized here. Splitting them out to their relevant init
718  * hooks is a future TODO item.
719  *
720  * Some notable things that are initialized here:
721  *
722  * - Display Core, both software and hardware
723  * - DC modules that we need (freesync and color management)
724  * - DRM software states
725  * - Interrupt sources and handlers
726  * - Vblank support
727  * - Debug FS entries, if enabled
728  */
729 static int dm_hw_init(void *handle)
730 {
731         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
732         /* Create DAL display manager */
733         amdgpu_dm_init(adev);
734         amdgpu_dm_hpd_init(adev);
735
736         return 0;
737 }
738
739 /**
740  * dm_hw_fini() - Teardown DC device
741  * @handle: The base driver device containing the amdpgu_dm device.
742  *
743  * Teardown components within &struct amdgpu_display_manager that require
744  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
745  * were loaded. Also flush IRQ workqueues and disable them.
746  */
747 static int dm_hw_fini(void *handle)
748 {
749         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
750
751         amdgpu_dm_hpd_fini(adev);
752
753         amdgpu_dm_irq_fini(adev);
754         amdgpu_dm_fini(adev);
755         return 0;
756 }
757
758 static int dm_suspend(void *handle)
759 {
760         struct amdgpu_device *adev = handle;
761         struct amdgpu_display_manager *dm = &adev->dm;
762         int ret = 0;
763
764         s3_handle_mst(adev->ddev, true);
765
766         amdgpu_dm_irq_suspend(adev);
767
768         WARN_ON(adev->dm.cached_state);
769         adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
770
771         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
772
773         return ret;
774 }
775
776 static struct amdgpu_dm_connector *
777 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
778                                              struct drm_crtc *crtc)
779 {
780         uint32_t i;
781         struct drm_connector_state *new_con_state;
782         struct drm_connector *connector;
783         struct drm_crtc *crtc_from_state;
784
785         for_each_new_connector_in_state(state, connector, new_con_state, i) {
786                 crtc_from_state = new_con_state->crtc;
787
788                 if (crtc_from_state == crtc)
789                         return to_amdgpu_dm_connector(connector);
790         }
791
792         return NULL;
793 }
794
795 static void emulated_link_detect(struct dc_link *link)
796 {
797         struct dc_sink_init_data sink_init_data = { 0 };
798         struct display_sink_capability sink_caps = { 0 };
799         enum dc_edid_status edid_status;
800         struct dc_context *dc_ctx = link->ctx;
801         struct dc_sink *sink = NULL;
802         struct dc_sink *prev_sink = NULL;
803
804         link->type = dc_connection_none;
805         prev_sink = link->local_sink;
806
807         if (prev_sink != NULL)
808                 dc_sink_retain(prev_sink);
809
810         switch (link->connector_signal) {
811         case SIGNAL_TYPE_HDMI_TYPE_A: {
812                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
813                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
814                 break;
815         }
816
817         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
818                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
819                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
820                 break;
821         }
822
823         case SIGNAL_TYPE_DVI_DUAL_LINK: {
824                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
825                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
826                 break;
827         }
828
829         case SIGNAL_TYPE_LVDS: {
830                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
831                 sink_caps.signal = SIGNAL_TYPE_LVDS;
832                 break;
833         }
834
835         case SIGNAL_TYPE_EDP: {
836                 sink_caps.transaction_type =
837                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
838                 sink_caps.signal = SIGNAL_TYPE_EDP;
839                 break;
840         }
841
842         case SIGNAL_TYPE_DISPLAY_PORT: {
843                 sink_caps.transaction_type =
844                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
845                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
846                 break;
847         }
848
849         default:
850                 DC_ERROR("Invalid connector type! signal:%d\n",
851                         link->connector_signal);
852                 return;
853         }
854
855         sink_init_data.link = link;
856         sink_init_data.sink_signal = sink_caps.signal;
857
858         sink = dc_sink_create(&sink_init_data);
859         if (!sink) {
860                 DC_ERROR("Failed to create sink!\n");
861                 return;
862         }
863
864         /* dc_sink_create returns a new reference */
865         link->local_sink = sink;
866
867         edid_status = dm_helpers_read_local_edid(
868                         link->ctx,
869                         link,
870                         sink);
871
872         if (edid_status != EDID_OK)
873                 DC_ERROR("Failed to read EDID");
874
875 }
876
877 static int dm_resume(void *handle)
878 {
879         struct amdgpu_device *adev = handle;
880         struct drm_device *ddev = adev->ddev;
881         struct amdgpu_display_manager *dm = &adev->dm;
882         struct amdgpu_dm_connector *aconnector;
883         struct drm_connector *connector;
884         struct drm_crtc *crtc;
885         struct drm_crtc_state *new_crtc_state;
886         struct dm_crtc_state *dm_new_crtc_state;
887         struct drm_plane *plane;
888         struct drm_plane_state *new_plane_state;
889         struct dm_plane_state *dm_new_plane_state;
890         enum dc_connection_type new_connection_type = dc_connection_none;
891         int i;
892
893         /* power on hardware */
894         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
895
896         /* program HPD filter */
897         dc_resume(dm->dc);
898
899         /* On resume we need to  rewrite the MSTM control bits to enamble MST*/
900         s3_handle_mst(ddev, false);
901
902         /*
903          * early enable HPD Rx IRQ, should be done before set mode as short
904          * pulse interrupts are used for MST
905          */
906         amdgpu_dm_irq_resume_early(adev);
907
908         /* Do detection*/
909         list_for_each_entry(connector, &ddev->mode_config.connector_list, head) {
910                 aconnector = to_amdgpu_dm_connector(connector);
911
912                 /*
913                  * this is the case when traversing through already created
914                  * MST connectors, should be skipped
915                  */
916                 if (aconnector->mst_port)
917                         continue;
918
919                 mutex_lock(&aconnector->hpd_lock);
920                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
921                         DRM_ERROR("KMS: Failed to detect connector\n");
922
923                 if (aconnector->base.force && new_connection_type == dc_connection_none)
924                         emulated_link_detect(aconnector->dc_link);
925                 else
926                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
927
928                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
929                         aconnector->fake_enable = false;
930
931                 if (aconnector->dc_sink)
932                         dc_sink_release(aconnector->dc_sink);
933                 aconnector->dc_sink = NULL;
934                 amdgpu_dm_update_connector_after_detect(aconnector);
935                 mutex_unlock(&aconnector->hpd_lock);
936         }
937
938         /* Force mode set in atomic commit */
939         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
940                 new_crtc_state->active_changed = true;
941
942         /*
943          * atomic_check is expected to create the dc states. We need to release
944          * them here, since they were duplicated as part of the suspend
945          * procedure.
946          */
947         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
948                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
949                 if (dm_new_crtc_state->stream) {
950                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
951                         dc_stream_release(dm_new_crtc_state->stream);
952                         dm_new_crtc_state->stream = NULL;
953                 }
954         }
955
956         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
957                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
958                 if (dm_new_plane_state->dc_state) {
959                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
960                         dc_plane_state_release(dm_new_plane_state->dc_state);
961                         dm_new_plane_state->dc_state = NULL;
962                 }
963         }
964
965         drm_atomic_helper_resume(ddev, dm->cached_state);
966
967         dm->cached_state = NULL;
968
969         amdgpu_dm_irq_resume_late(adev);
970
971         return 0;
972 }
973
974 /**
975  * DOC: DM Lifecycle
976  *
977  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
978  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
979  * the base driver's device list to be initialized and torn down accordingly.
980  *
981  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
982  */
983
984 static const struct amd_ip_funcs amdgpu_dm_funcs = {
985         .name = "dm",
986         .early_init = dm_early_init,
987         .late_init = dm_late_init,
988         .sw_init = dm_sw_init,
989         .sw_fini = dm_sw_fini,
990         .hw_init = dm_hw_init,
991         .hw_fini = dm_hw_fini,
992         .suspend = dm_suspend,
993         .resume = dm_resume,
994         .is_idle = dm_is_idle,
995         .wait_for_idle = dm_wait_for_idle,
996         .check_soft_reset = dm_check_soft_reset,
997         .soft_reset = dm_soft_reset,
998         .set_clockgating_state = dm_set_clockgating_state,
999         .set_powergating_state = dm_set_powergating_state,
1000 };
1001
1002 const struct amdgpu_ip_block_version dm_ip_block =
1003 {
1004         .type = AMD_IP_BLOCK_TYPE_DCE,
1005         .major = 1,
1006         .minor = 0,
1007         .rev = 0,
1008         .funcs = &amdgpu_dm_funcs,
1009 };
1010
1011
1012 /**
1013  * DOC: atomic
1014  *
1015  * *WIP*
1016  */
1017
1018 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
1019         .fb_create = amdgpu_display_user_framebuffer_create,
1020         .output_poll_changed = drm_fb_helper_output_poll_changed,
1021         .atomic_check = amdgpu_dm_atomic_check,
1022         .atomic_commit = amdgpu_dm_atomic_commit,
1023 };
1024
1025 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
1026         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
1027 };
1028
1029 static void
1030 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
1031 {
1032         struct drm_connector *connector = &aconnector->base;
1033         struct drm_device *dev = connector->dev;
1034         struct dc_sink *sink;
1035
1036         /* MST handled by drm_mst framework */
1037         if (aconnector->mst_mgr.mst_state == true)
1038                 return;
1039
1040
1041         sink = aconnector->dc_link->local_sink;
1042         if (sink)
1043                 dc_sink_retain(sink);
1044
1045         /*
1046          * Edid mgmt connector gets first update only in mode_valid hook and then
1047          * the connector sink is set to either fake or physical sink depends on link status.
1048          * Skip if already done during boot.
1049          */
1050         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
1051                         && aconnector->dc_em_sink) {
1052
1053                 /*
1054                  * For S3 resume with headless use eml_sink to fake stream
1055                  * because on resume connector->sink is set to NULL
1056                  */
1057                 mutex_lock(&dev->mode_config.mutex);
1058
1059                 if (sink) {
1060                         if (aconnector->dc_sink) {
1061                                 amdgpu_dm_update_freesync_caps(connector, NULL);
1062                                 /*
1063                                  * retain and release below are used to
1064                                  * bump up refcount for sink because the link doesn't point
1065                                  * to it anymore after disconnect, so on next crtc to connector
1066                                  * reshuffle by UMD we will get into unwanted dc_sink release
1067                                  */
1068                                 dc_sink_release(aconnector->dc_sink);
1069                         }
1070                         aconnector->dc_sink = sink;
1071                         dc_sink_retain(aconnector->dc_sink);
1072                         amdgpu_dm_update_freesync_caps(connector,
1073                                         aconnector->edid);
1074                 } else {
1075                         amdgpu_dm_update_freesync_caps(connector, NULL);
1076                         if (!aconnector->dc_sink) {
1077                                 aconnector->dc_sink = aconnector->dc_em_sink;
1078                                 dc_sink_retain(aconnector->dc_sink);
1079                         }
1080                 }
1081
1082                 mutex_unlock(&dev->mode_config.mutex);
1083
1084                 if (sink)
1085                         dc_sink_release(sink);
1086                 return;
1087         }
1088
1089         /*
1090          * TODO: temporary guard to look for proper fix
1091          * if this sink is MST sink, we should not do anything
1092          */
1093         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
1094                 dc_sink_release(sink);
1095                 return;
1096         }
1097
1098         if (aconnector->dc_sink == sink) {
1099                 /*
1100                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
1101                  * Do nothing!!
1102                  */
1103                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
1104                                 aconnector->connector_id);
1105                 if (sink)
1106                         dc_sink_release(sink);
1107                 return;
1108         }
1109
1110         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
1111                 aconnector->connector_id, aconnector->dc_sink, sink);
1112
1113         mutex_lock(&dev->mode_config.mutex);
1114
1115         /*
1116          * 1. Update status of the drm connector
1117          * 2. Send an event and let userspace tell us what to do
1118          */
1119         if (sink) {
1120                 /*
1121                  * TODO: check if we still need the S3 mode update workaround.
1122                  * If yes, put it here.
1123                  */
1124                 if (aconnector->dc_sink)
1125                         amdgpu_dm_update_freesync_caps(connector, NULL);
1126
1127                 aconnector->dc_sink = sink;
1128                 dc_sink_retain(aconnector->dc_sink);
1129                 if (sink->dc_edid.length == 0) {
1130                         aconnector->edid = NULL;
1131                         drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
1132                 } else {
1133                         aconnector->edid =
1134                                 (struct edid *) sink->dc_edid.raw_edid;
1135
1136
1137                         drm_connector_update_edid_property(connector,
1138                                         aconnector->edid);
1139                         drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
1140                                             aconnector->edid);
1141                 }
1142                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
1143
1144         } else {
1145                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
1146                 amdgpu_dm_update_freesync_caps(connector, NULL);
1147                 drm_connector_update_edid_property(connector, NULL);
1148                 aconnector->num_modes = 0;
1149                 dc_sink_release(aconnector->dc_sink);
1150                 aconnector->dc_sink = NULL;
1151                 aconnector->edid = NULL;
1152         }
1153
1154         mutex_unlock(&dev->mode_config.mutex);
1155
1156         if (sink)
1157                 dc_sink_release(sink);
1158 }
1159
1160 static void handle_hpd_irq(void *param)
1161 {
1162         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
1163         struct drm_connector *connector = &aconnector->base;
1164         struct drm_device *dev = connector->dev;
1165         enum dc_connection_type new_connection_type = dc_connection_none;
1166
1167         /*
1168          * In case of failure or MST no need to update connector status or notify the OS
1169          * since (for MST case) MST does this in its own context.
1170          */
1171         mutex_lock(&aconnector->hpd_lock);
1172
1173         if (aconnector->fake_enable)
1174                 aconnector->fake_enable = false;
1175
1176         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1177                 DRM_ERROR("KMS: Failed to detect connector\n");
1178
1179         if (aconnector->base.force && new_connection_type == dc_connection_none) {
1180                 emulated_link_detect(aconnector->dc_link);
1181
1182
1183                 drm_modeset_lock_all(dev);
1184                 dm_restore_drm_connector_state(dev, connector);
1185                 drm_modeset_unlock_all(dev);
1186
1187                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
1188                         drm_kms_helper_hotplug_event(dev);
1189
1190         } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
1191                 amdgpu_dm_update_connector_after_detect(aconnector);
1192
1193
1194                 drm_modeset_lock_all(dev);
1195                 dm_restore_drm_connector_state(dev, connector);
1196                 drm_modeset_unlock_all(dev);
1197
1198                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
1199                         drm_kms_helper_hotplug_event(dev);
1200         }
1201         mutex_unlock(&aconnector->hpd_lock);
1202
1203 }
1204
1205 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
1206 {
1207         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
1208         uint8_t dret;
1209         bool new_irq_handled = false;
1210         int dpcd_addr;
1211         int dpcd_bytes_to_read;
1212
1213         const int max_process_count = 30;
1214         int process_count = 0;
1215
1216         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
1217
1218         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
1219                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
1220                 /* DPCD 0x200 - 0x201 for downstream IRQ */
1221                 dpcd_addr = DP_SINK_COUNT;
1222         } else {
1223                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
1224                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
1225                 dpcd_addr = DP_SINK_COUNT_ESI;
1226         }
1227
1228         dret = drm_dp_dpcd_read(
1229                 &aconnector->dm_dp_aux.aux,
1230                 dpcd_addr,
1231                 esi,
1232                 dpcd_bytes_to_read);
1233
1234         while (dret == dpcd_bytes_to_read &&
1235                 process_count < max_process_count) {
1236                 uint8_t retry;
1237                 dret = 0;
1238
1239                 process_count++;
1240
1241                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
1242                 /* handle HPD short pulse irq */
1243                 if (aconnector->mst_mgr.mst_state)
1244                         drm_dp_mst_hpd_irq(
1245                                 &aconnector->mst_mgr,
1246                                 esi,
1247                                 &new_irq_handled);
1248
1249                 if (new_irq_handled) {
1250                         /* ACK at DPCD to notify down stream */
1251                         const int ack_dpcd_bytes_to_write =
1252                                 dpcd_bytes_to_read - 1;
1253
1254                         for (retry = 0; retry < 3; retry++) {
1255                                 uint8_t wret;
1256
1257                                 wret = drm_dp_dpcd_write(
1258                                         &aconnector->dm_dp_aux.aux,
1259                                         dpcd_addr + 1,
1260                                         &esi[1],
1261                                         ack_dpcd_bytes_to_write);
1262                                 if (wret == ack_dpcd_bytes_to_write)
1263                                         break;
1264                         }
1265
1266                         /* check if there is new irq to be handled */
1267                         dret = drm_dp_dpcd_read(
1268                                 &aconnector->dm_dp_aux.aux,
1269                                 dpcd_addr,
1270                                 esi,
1271                                 dpcd_bytes_to_read);
1272
1273                         new_irq_handled = false;
1274                 } else {
1275                         break;
1276                 }
1277         }
1278
1279         if (process_count == max_process_count)
1280                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
1281 }
1282
1283 static void handle_hpd_rx_irq(void *param)
1284 {
1285         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
1286         struct drm_connector *connector = &aconnector->base;
1287         struct drm_device *dev = connector->dev;
1288         struct dc_link *dc_link = aconnector->dc_link;
1289         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
1290         enum dc_connection_type new_connection_type = dc_connection_none;
1291
1292         /*
1293          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
1294          * conflict, after implement i2c helper, this mutex should be
1295          * retired.
1296          */
1297         if (dc_link->type != dc_connection_mst_branch)
1298                 mutex_lock(&aconnector->hpd_lock);
1299
1300         if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
1301                         !is_mst_root_connector) {
1302                 /* Downstream Port status changed. */
1303                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
1304                         DRM_ERROR("KMS: Failed to detect connector\n");
1305
1306                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
1307                         emulated_link_detect(dc_link);
1308
1309                         if (aconnector->fake_enable)
1310                                 aconnector->fake_enable = false;
1311
1312                         amdgpu_dm_update_connector_after_detect(aconnector);
1313
1314
1315                         drm_modeset_lock_all(dev);
1316                         dm_restore_drm_connector_state(dev, connector);
1317                         drm_modeset_unlock_all(dev);
1318
1319                         drm_kms_helper_hotplug_event(dev);
1320                 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
1321
1322                         if (aconnector->fake_enable)
1323                                 aconnector->fake_enable = false;
1324
1325                         amdgpu_dm_update_connector_after_detect(aconnector);
1326
1327
1328                         drm_modeset_lock_all(dev);
1329                         dm_restore_drm_connector_state(dev, connector);
1330                         drm_modeset_unlock_all(dev);
1331
1332                         drm_kms_helper_hotplug_event(dev);
1333                 }
1334         }
1335         if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
1336             (dc_link->type == dc_connection_mst_branch))
1337                 dm_handle_hpd_rx_irq(aconnector);
1338
1339         if (dc_link->type != dc_connection_mst_branch) {
1340                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
1341                 mutex_unlock(&aconnector->hpd_lock);
1342         }
1343 }
1344
1345 static void register_hpd_handlers(struct amdgpu_device *adev)
1346 {
1347         struct drm_device *dev = adev->ddev;
1348         struct drm_connector *connector;
1349         struct amdgpu_dm_connector *aconnector;
1350         const struct dc_link *dc_link;
1351         struct dc_interrupt_params int_params = {0};
1352
1353         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1354         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1355
1356         list_for_each_entry(connector,
1357                         &dev->mode_config.connector_list, head) {
1358
1359                 aconnector = to_amdgpu_dm_connector(connector);
1360                 dc_link = aconnector->dc_link;
1361
1362                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
1363                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
1364                         int_params.irq_source = dc_link->irq_source_hpd;
1365
1366                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
1367                                         handle_hpd_irq,
1368                                         (void *) aconnector);
1369                 }
1370
1371                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
1372
1373                         /* Also register for DP short pulse (hpd_rx). */
1374                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
1375                         int_params.irq_source = dc_link->irq_source_hpd_rx;
1376
1377                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
1378                                         handle_hpd_rx_irq,
1379                                         (void *) aconnector);
1380                 }
1381         }
1382 }
1383
1384 /* Register IRQ sources and initialize IRQ callbacks */
1385 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
1386 {
1387         struct dc *dc = adev->dm.dc;
1388         struct common_irq_params *c_irq_params;
1389         struct dc_interrupt_params int_params = {0};
1390         int r;
1391         int i;
1392         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
1393
1394         if (adev->asic_type == CHIP_VEGA10 ||
1395             adev->asic_type == CHIP_VEGA12 ||
1396             adev->asic_type == CHIP_VEGA20 ||
1397             adev->asic_type == CHIP_RAVEN)
1398                 client_id = SOC15_IH_CLIENTID_DCE;
1399
1400         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1401         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1402
1403         /*
1404          * Actions of amdgpu_irq_add_id():
1405          * 1. Register a set() function with base driver.
1406          *    Base driver will call set() function to enable/disable an
1407          *    interrupt in DC hardware.
1408          * 2. Register amdgpu_dm_irq_handler().
1409          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1410          *    coming from DC hardware.
1411          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1412          *    for acknowledging and handling. */
1413
1414         /* Use VBLANK interrupt */
1415         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
1416                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
1417                 if (r) {
1418                         DRM_ERROR("Failed to add crtc irq id!\n");
1419                         return r;
1420                 }
1421
1422                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1423                 int_params.irq_source =
1424                         dc_interrupt_to_irq_source(dc, i, 0);
1425
1426                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
1427
1428                 c_irq_params->adev = adev;
1429                 c_irq_params->irq_src = int_params.irq_source;
1430
1431                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1432                                 dm_crtc_high_irq, c_irq_params);
1433         }
1434
1435         /* Use GRPH_PFLIP interrupt */
1436         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
1437                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
1438                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
1439                 if (r) {
1440                         DRM_ERROR("Failed to add page flip irq id!\n");
1441                         return r;
1442                 }
1443
1444                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1445                 int_params.irq_source =
1446                         dc_interrupt_to_irq_source(dc, i, 0);
1447
1448                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
1449
1450                 c_irq_params->adev = adev;
1451                 c_irq_params->irq_src = int_params.irq_source;
1452
1453                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1454                                 dm_pflip_high_irq, c_irq_params);
1455
1456         }
1457
1458         /* HPD */
1459         r = amdgpu_irq_add_id(adev, client_id,
1460                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
1461         if (r) {
1462                 DRM_ERROR("Failed to add hpd irq id!\n");
1463                 return r;
1464         }
1465
1466         register_hpd_handlers(adev);
1467
1468         return 0;
1469 }
1470
1471 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1472 /* Register IRQ sources and initialize IRQ callbacks */
1473 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
1474 {
1475         struct dc *dc = adev->dm.dc;
1476         struct common_irq_params *c_irq_params;
1477         struct dc_interrupt_params int_params = {0};
1478         int r;
1479         int i;
1480
1481         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1482         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1483
1484         /*
1485          * Actions of amdgpu_irq_add_id():
1486          * 1. Register a set() function with base driver.
1487          *    Base driver will call set() function to enable/disable an
1488          *    interrupt in DC hardware.
1489          * 2. Register amdgpu_dm_irq_handler().
1490          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1491          *    coming from DC hardware.
1492          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1493          *    for acknowledging and handling.
1494          */
1495
1496         /* Use VSTARTUP interrupt */
1497         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
1498                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
1499                         i++) {
1500                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
1501
1502                 if (r) {
1503                         DRM_ERROR("Failed to add crtc irq id!\n");
1504                         return r;
1505                 }
1506
1507                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1508                 int_params.irq_source =
1509                         dc_interrupt_to_irq_source(dc, i, 0);
1510
1511                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
1512
1513                 c_irq_params->adev = adev;
1514                 c_irq_params->irq_src = int_params.irq_source;
1515
1516                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1517                                 dm_crtc_high_irq, c_irq_params);
1518         }
1519
1520         /* Use GRPH_PFLIP interrupt */
1521         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
1522                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
1523                         i++) {
1524                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
1525                 if (r) {
1526                         DRM_ERROR("Failed to add page flip irq id!\n");
1527                         return r;
1528                 }
1529
1530                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1531                 int_params.irq_source =
1532                         dc_interrupt_to_irq_source(dc, i, 0);
1533
1534                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
1535
1536                 c_irq_params->adev = adev;
1537                 c_irq_params->irq_src = int_params.irq_source;
1538
1539                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1540                                 dm_pflip_high_irq, c_irq_params);
1541
1542         }
1543
1544         /* HPD */
1545         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
1546                         &adev->hpd_irq);
1547         if (r) {
1548                 DRM_ERROR("Failed to add hpd irq id!\n");
1549                 return r;
1550         }
1551
1552         register_hpd_handlers(adev);
1553
1554         return 0;
1555 }
1556 #endif
1557
1558 /*
1559  * Acquires the lock for the atomic state object and returns
1560  * the new atomic state.
1561  *
1562  * This should only be called during atomic check.
1563  */
1564 static int dm_atomic_get_state(struct drm_atomic_state *state,
1565                                struct dm_atomic_state **dm_state)
1566 {
1567         struct drm_device *dev = state->dev;
1568         struct amdgpu_device *adev = dev->dev_private;
1569         struct amdgpu_display_manager *dm = &adev->dm;
1570         struct drm_private_state *priv_state;
1571
1572         if (*dm_state)
1573                 return 0;
1574
1575         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
1576         if (IS_ERR(priv_state))
1577                 return PTR_ERR(priv_state);
1578
1579         *dm_state = to_dm_atomic_state(priv_state);
1580
1581         return 0;
1582 }
1583
1584 struct dm_atomic_state *
1585 dm_atomic_get_new_state(struct drm_atomic_state *state)
1586 {
1587         struct drm_device *dev = state->dev;
1588         struct amdgpu_device *adev = dev->dev_private;
1589         struct amdgpu_display_manager *dm = &adev->dm;
1590         struct drm_private_obj *obj;
1591         struct drm_private_state *new_obj_state;
1592         int i;
1593
1594         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
1595                 if (obj->funcs == dm->atomic_obj.funcs)
1596                         return to_dm_atomic_state(new_obj_state);
1597         }
1598
1599         return NULL;
1600 }
1601
1602 struct dm_atomic_state *
1603 dm_atomic_get_old_state(struct drm_atomic_state *state)
1604 {
1605         struct drm_device *dev = state->dev;
1606         struct amdgpu_device *adev = dev->dev_private;
1607         struct amdgpu_display_manager *dm = &adev->dm;
1608         struct drm_private_obj *obj;
1609         struct drm_private_state *old_obj_state;
1610         int i;
1611
1612         for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
1613                 if (obj->funcs == dm->atomic_obj.funcs)
1614                         return to_dm_atomic_state(old_obj_state);
1615         }
1616
1617         return NULL;
1618 }
1619
1620 static struct drm_private_state *
1621 dm_atomic_duplicate_state(struct drm_private_obj *obj)
1622 {
1623         struct dm_atomic_state *old_state, *new_state;
1624
1625         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
1626         if (!new_state)
1627                 return NULL;
1628
1629         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
1630
1631         new_state->context = dc_create_state();
1632         if (!new_state->context) {
1633                 kfree(new_state);
1634                 return NULL;
1635         }
1636
1637         old_state = to_dm_atomic_state(obj->state);
1638         if (old_state && old_state->context)
1639                 dc_resource_state_copy_construct(old_state->context,
1640                                                  new_state->context);
1641
1642         return &new_state->base;
1643 }
1644
1645 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
1646                                     struct drm_private_state *state)
1647 {
1648         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
1649
1650         if (dm_state && dm_state->context)
1651                 dc_release_state(dm_state->context);
1652
1653         kfree(dm_state);
1654 }
1655
1656 static struct drm_private_state_funcs dm_atomic_state_funcs = {
1657         .atomic_duplicate_state = dm_atomic_duplicate_state,
1658         .atomic_destroy_state = dm_atomic_destroy_state,
1659 };
1660
1661 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
1662 {
1663         struct dm_atomic_state *state;
1664         int r;
1665
1666         adev->mode_info.mode_config_initialized = true;
1667
1668         adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
1669         adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
1670
1671         adev->ddev->mode_config.max_width = 16384;
1672         adev->ddev->mode_config.max_height = 16384;
1673
1674         adev->ddev->mode_config.preferred_depth = 24;
1675         adev->ddev->mode_config.prefer_shadow = 1;
1676         /* indicates support for immediate flip */
1677         adev->ddev->mode_config.async_page_flip = true;
1678
1679         adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
1680
1681         state = kzalloc(sizeof(*state), GFP_KERNEL);
1682         if (!state)
1683                 return -ENOMEM;
1684
1685         state->context = dc_create_state();
1686         if (!state->context) {
1687                 kfree(state);
1688                 return -ENOMEM;
1689         }
1690
1691         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
1692
1693         drm_atomic_private_obj_init(adev->ddev,
1694                                     &adev->dm.atomic_obj,
1695                                     &state->base,
1696                                     &dm_atomic_state_funcs);
1697
1698         r = amdgpu_display_modeset_create_props(adev);
1699         if (r)
1700                 return r;
1701
1702         return 0;
1703 }
1704
1705 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
1706 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
1707
1708 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1709         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1710
1711 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
1712 {
1713 #if defined(CONFIG_ACPI)
1714         struct amdgpu_dm_backlight_caps caps;
1715
1716         if (dm->backlight_caps.caps_valid)
1717                 return;
1718
1719         amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
1720         if (caps.caps_valid) {
1721                 dm->backlight_caps.min_input_signal = caps.min_input_signal;
1722                 dm->backlight_caps.max_input_signal = caps.max_input_signal;
1723                 dm->backlight_caps.caps_valid = true;
1724         } else {
1725                 dm->backlight_caps.min_input_signal =
1726                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
1727                 dm->backlight_caps.max_input_signal =
1728                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
1729         }
1730 #else
1731         dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
1732         dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
1733 #endif
1734 }
1735
1736 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
1737 {
1738         struct amdgpu_display_manager *dm = bl_get_data(bd);
1739         struct amdgpu_dm_backlight_caps caps;
1740         uint32_t brightness = bd->props.brightness;
1741
1742         amdgpu_dm_update_backlight_caps(dm);
1743         caps = dm->backlight_caps;
1744         /*
1745          * The brightness input is in the range 0-255
1746          * It needs to be rescaled to be between the
1747          * requested min and max input signal
1748          *
1749          * It also needs to be scaled up by 0x101 to
1750          * match the DC interface which has a range of
1751          * 0 to 0xffff
1752          */
1753         brightness =
1754                 brightness
1755                 * 0x101
1756                 * (caps.max_input_signal - caps.min_input_signal)
1757                 / AMDGPU_MAX_BL_LEVEL
1758                 + caps.min_input_signal * 0x101;
1759
1760         if (dc_link_set_backlight_level(dm->backlight_link,
1761                         brightness, 0))
1762                 return 0;
1763         else
1764                 return 1;
1765 }
1766
1767 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
1768 {
1769         struct amdgpu_display_manager *dm = bl_get_data(bd);
1770         int ret = dc_link_get_backlight_level(dm->backlight_link);
1771
1772         if (ret == DC_ERROR_UNEXPECTED)
1773                 return bd->props.brightness;
1774         return ret;
1775 }
1776
1777 static const struct backlight_ops amdgpu_dm_backlight_ops = {
1778         .get_brightness = amdgpu_dm_backlight_get_brightness,
1779         .update_status  = amdgpu_dm_backlight_update_status,
1780 };
1781
1782 static void
1783 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
1784 {
1785         char bl_name[16];
1786         struct backlight_properties props = { 0 };
1787
1788         amdgpu_dm_update_backlight_caps(dm);
1789
1790         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
1791         props.brightness = AMDGPU_MAX_BL_LEVEL;
1792         props.type = BACKLIGHT_RAW;
1793
1794         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
1795                         dm->adev->ddev->primary->index);
1796
1797         dm->backlight_dev = backlight_device_register(bl_name,
1798                         dm->adev->ddev->dev,
1799                         dm,
1800                         &amdgpu_dm_backlight_ops,
1801                         &props);
1802
1803         if (IS_ERR(dm->backlight_dev))
1804                 DRM_ERROR("DM: Backlight registration failed!\n");
1805         else
1806                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
1807 }
1808
1809 #endif
1810
1811 static int initialize_plane(struct amdgpu_display_manager *dm,
1812                             struct amdgpu_mode_info *mode_info, int plane_id,
1813                             enum drm_plane_type plane_type)
1814 {
1815         struct drm_plane *plane;
1816         unsigned long possible_crtcs;
1817         int ret = 0;
1818
1819         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
1820         mode_info->planes[plane_id] = plane;
1821
1822         if (!plane) {
1823                 DRM_ERROR("KMS: Failed to allocate plane\n");
1824                 return -ENOMEM;
1825         }
1826         plane->type = plane_type;
1827
1828         /*
1829          * HACK: IGT tests expect that the primary plane for a CRTC
1830          * can only have one possible CRTC. Only expose support for
1831          * any CRTC if they're not going to be used as a primary plane
1832          * for a CRTC - like overlay or underlay planes.
1833          */
1834         possible_crtcs = 1 << plane_id;
1835         if (plane_id >= dm->dc->caps.max_streams)
1836                 possible_crtcs = 0xff;
1837
1838         ret = amdgpu_dm_plane_init(dm, mode_info->planes[plane_id], possible_crtcs);
1839
1840         if (ret) {
1841                 DRM_ERROR("KMS: Failed to initialize plane\n");
1842                 return ret;
1843         }
1844
1845         return ret;
1846 }
1847
1848
1849 static void register_backlight_device(struct amdgpu_display_manager *dm,
1850                                       struct dc_link *link)
1851 {
1852 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1853         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1854
1855         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
1856             link->type != dc_connection_none) {
1857                 /*
1858                  * Event if registration failed, we should continue with
1859                  * DM initialization because not having a backlight control
1860                  * is better then a black screen.
1861                  */
1862                 amdgpu_dm_register_backlight_device(dm);
1863
1864                 if (dm->backlight_dev)
1865                         dm->backlight_link = link;
1866         }
1867 #endif
1868 }
1869
1870
1871 /*
1872  * In this architecture, the association
1873  * connector -> encoder -> crtc
1874  * id not really requried. The crtc and connector will hold the
1875  * display_index as an abstraction to use with DAL component
1876  *
1877  * Returns 0 on success
1878  */
1879 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
1880 {
1881         struct amdgpu_display_manager *dm = &adev->dm;
1882         int32_t i;
1883         struct amdgpu_dm_connector *aconnector = NULL;
1884         struct amdgpu_encoder *aencoder = NULL;
1885         struct amdgpu_mode_info *mode_info = &adev->mode_info;
1886         uint32_t link_cnt;
1887         int32_t overlay_planes, primary_planes, total_planes;
1888         enum dc_connection_type new_connection_type = dc_connection_none;
1889
1890         link_cnt = dm->dc->caps.max_links;
1891         if (amdgpu_dm_mode_config_init(dm->adev)) {
1892                 DRM_ERROR("DM: Failed to initialize mode config\n");
1893                 return -EINVAL;
1894         }
1895
1896         /*
1897          * Determine the number of overlay planes supported.
1898          * Only support DCN for now, and cap so we don't encourage
1899          * userspace to use up all the planes.
1900          */
1901         overlay_planes = 0;
1902
1903         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
1904                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
1905
1906                 if (plane->type == DC_PLANE_TYPE_DCN_UNIVERSAL &&
1907                     plane->blends_with_above && plane->blends_with_below &&
1908                     plane->supports_argb8888)
1909                         overlay_planes += 1;
1910         }
1911
1912         overlay_planes = min(overlay_planes, 1);
1913
1914         /* There is one primary plane per CRTC */
1915         primary_planes = dm->dc->caps.max_streams;
1916
1917         total_planes = primary_planes + overlay_planes;
1918         ASSERT(total_planes <= AMDGPU_MAX_PLANES);
1919
1920         /*
1921          * Initialize primary planes, implicit planes for legacy IOCTLS.
1922          * Order is reversed to match iteration order in atomic check.
1923          */
1924         for (i = (primary_planes - 1); i >= 0; i--) {
1925                 if (initialize_plane(dm, mode_info, i,
1926                                      DRM_PLANE_TYPE_PRIMARY)) {
1927                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
1928                         goto fail;
1929                 }
1930         }
1931
1932         /*
1933          * Initialize overlay planes, index starting after primary planes.
1934          * These planes have a higher DRM index than the primary planes since
1935          * they should be considered as having a higher z-order.
1936          * Order is reversed to match iteration order in atomic check.
1937          */
1938         for (i = (overlay_planes - 1); i >= 0; i--) {
1939                 if (initialize_plane(dm, mode_info, primary_planes + i,
1940                                      DRM_PLANE_TYPE_OVERLAY)) {
1941                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
1942                         goto fail;
1943                 }
1944         }
1945
1946         for (i = 0; i < dm->dc->caps.max_streams; i++)
1947                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
1948                         DRM_ERROR("KMS: Failed to initialize crtc\n");
1949                         goto fail;
1950                 }
1951
1952         dm->display_indexes_num = dm->dc->caps.max_streams;
1953
1954         /* loops over all connectors on the board */
1955         for (i = 0; i < link_cnt; i++) {
1956                 struct dc_link *link = NULL;
1957
1958                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
1959                         DRM_ERROR(
1960                                 "KMS: Cannot support more than %d display indexes\n",
1961                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
1962                         continue;
1963                 }
1964
1965                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
1966                 if (!aconnector)
1967                         goto fail;
1968
1969                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
1970                 if (!aencoder)
1971                         goto fail;
1972
1973                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
1974                         DRM_ERROR("KMS: Failed to initialize encoder\n");
1975                         goto fail;
1976                 }
1977
1978                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
1979                         DRM_ERROR("KMS: Failed to initialize connector\n");
1980                         goto fail;
1981                 }
1982
1983                 link = dc_get_link_at_index(dm->dc, i);
1984
1985                 if (!dc_link_detect_sink(link, &new_connection_type))
1986                         DRM_ERROR("KMS: Failed to detect connector\n");
1987
1988                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
1989                         emulated_link_detect(link);
1990                         amdgpu_dm_update_connector_after_detect(aconnector);
1991
1992                 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
1993                         amdgpu_dm_update_connector_after_detect(aconnector);
1994                         register_backlight_device(dm, link);
1995                 }
1996
1997
1998         }
1999
2000         /* Software is initialized. Now we can register interrupt handlers. */
2001         switch (adev->asic_type) {
2002         case CHIP_BONAIRE:
2003         case CHIP_HAWAII:
2004         case CHIP_KAVERI:
2005         case CHIP_KABINI:
2006         case CHIP_MULLINS:
2007         case CHIP_TONGA:
2008         case CHIP_FIJI:
2009         case CHIP_CARRIZO:
2010         case CHIP_STONEY:
2011         case CHIP_POLARIS11:
2012         case CHIP_POLARIS10:
2013         case CHIP_POLARIS12:
2014         case CHIP_VEGAM:
2015         case CHIP_VEGA10:
2016         case CHIP_VEGA12:
2017         case CHIP_VEGA20:
2018                 if (dce110_register_irq_handlers(dm->adev)) {
2019                         DRM_ERROR("DM: Failed to initialize IRQ\n");
2020                         goto fail;
2021                 }
2022                 break;
2023 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
2024         case CHIP_RAVEN:
2025                 if (dcn10_register_irq_handlers(dm->adev)) {
2026                         DRM_ERROR("DM: Failed to initialize IRQ\n");
2027                         goto fail;
2028                 }
2029                 break;
2030 #endif
2031         default:
2032                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
2033                 goto fail;
2034         }
2035
2036         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
2037                 dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
2038
2039         return 0;
2040 fail:
2041         kfree(aencoder);
2042         kfree(aconnector);
2043         for (i = 0; i < primary_planes; i++)
2044                 kfree(mode_info->planes[i]);
2045         return -EINVAL;
2046 }
2047
2048 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
2049 {
2050         drm_mode_config_cleanup(dm->ddev);
2051         drm_atomic_private_obj_fini(&dm->atomic_obj);
2052         return;
2053 }
2054
2055 /******************************************************************************
2056  * amdgpu_display_funcs functions
2057  *****************************************************************************/
2058
2059 /*
2060  * dm_bandwidth_update - program display watermarks
2061  *
2062  * @adev: amdgpu_device pointer
2063  *
2064  * Calculate and program the display watermarks and line buffer allocation.
2065  */
2066 static void dm_bandwidth_update(struct amdgpu_device *adev)
2067 {
2068         /* TODO: implement later */
2069 }
2070
2071 static const struct amdgpu_display_funcs dm_display_funcs = {
2072         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
2073         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
2074         .backlight_set_level = NULL, /* never called for DC */
2075         .backlight_get_level = NULL, /* never called for DC */
2076         .hpd_sense = NULL,/* called unconditionally */
2077         .hpd_set_polarity = NULL, /* called unconditionally */
2078         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
2079         .page_flip_get_scanoutpos =
2080                 dm_crtc_get_scanoutpos,/* called unconditionally */
2081         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
2082         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
2083 };
2084
2085 #if defined(CONFIG_DEBUG_KERNEL_DC)
2086
2087 static ssize_t s3_debug_store(struct device *device,
2088                               struct device_attribute *attr,
2089                               const char *buf,
2090                               size_t count)
2091 {
2092         int ret;
2093         int s3_state;
2094         struct pci_dev *pdev = to_pci_dev(device);
2095         struct drm_device *drm_dev = pci_get_drvdata(pdev);
2096         struct amdgpu_device *adev = drm_dev->dev_private;
2097
2098         ret = kstrtoint(buf, 0, &s3_state);
2099
2100         if (ret == 0) {
2101                 if (s3_state) {
2102                         dm_resume(adev);
2103                         drm_kms_helper_hotplug_event(adev->ddev);
2104                 } else
2105                         dm_suspend(adev);
2106         }
2107
2108         return ret == 0 ? count : 0;
2109 }
2110
2111 DEVICE_ATTR_WO(s3_debug);
2112
2113 #endif
2114
2115 static int dm_early_init(void *handle)
2116 {
2117         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2118
2119         switch (adev->asic_type) {
2120         case CHIP_BONAIRE:
2121         case CHIP_HAWAII:
2122                 adev->mode_info.num_crtc = 6;
2123                 adev->mode_info.num_hpd = 6;
2124                 adev->mode_info.num_dig = 6;
2125                 break;
2126         case CHIP_KAVERI:
2127                 adev->mode_info.num_crtc = 4;
2128                 adev->mode_info.num_hpd = 6;
2129                 adev->mode_info.num_dig = 7;
2130                 break;
2131         case CHIP_KABINI:
2132         case CHIP_MULLINS:
2133                 adev->mode_info.num_crtc = 2;
2134                 adev->mode_info.num_hpd = 6;
2135                 adev->mode_info.num_dig = 6;
2136                 break;
2137         case CHIP_FIJI:
2138         case CHIP_TONGA:
2139                 adev->mode_info.num_crtc = 6;
2140                 adev->mode_info.num_hpd = 6;
2141                 adev->mode_info.num_dig = 7;
2142                 break;
2143         case CHIP_CARRIZO:
2144                 adev->mode_info.num_crtc = 3;
2145                 adev->mode_info.num_hpd = 6;
2146                 adev->mode_info.num_dig = 9;
2147                 break;
2148         case CHIP_STONEY:
2149                 adev->mode_info.num_crtc = 2;
2150                 adev->mode_info.num_hpd = 6;
2151                 adev->mode_info.num_dig = 9;
2152                 break;
2153         case CHIP_POLARIS11:
2154         case CHIP_POLARIS12:
2155                 adev->mode_info.num_crtc = 5;
2156                 adev->mode_info.num_hpd = 5;
2157                 adev->mode_info.num_dig = 5;
2158                 break;
2159         case CHIP_POLARIS10:
2160         case CHIP_VEGAM:
2161                 adev->mode_info.num_crtc = 6;
2162                 adev->mode_info.num_hpd = 6;
2163                 adev->mode_info.num_dig = 6;
2164                 break;
2165         case CHIP_VEGA10:
2166         case CHIP_VEGA12:
2167         case CHIP_VEGA20:
2168                 adev->mode_info.num_crtc = 6;
2169                 adev->mode_info.num_hpd = 6;
2170                 adev->mode_info.num_dig = 6;
2171                 break;
2172 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
2173         case CHIP_RAVEN:
2174                 adev->mode_info.num_crtc = 4;
2175                 adev->mode_info.num_hpd = 4;
2176                 adev->mode_info.num_dig = 4;
2177                 break;
2178 #endif
2179         default:
2180                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
2181                 return -EINVAL;
2182         }
2183
2184         amdgpu_dm_set_irq_funcs(adev);
2185
2186         if (adev->mode_info.funcs == NULL)
2187                 adev->mode_info.funcs = &dm_display_funcs;
2188
2189         /*
2190          * Note: Do NOT change adev->audio_endpt_rreg and
2191          * adev->audio_endpt_wreg because they are initialised in
2192          * amdgpu_device_init()
2193          */
2194 #if defined(CONFIG_DEBUG_KERNEL_DC)
2195         device_create_file(
2196                 adev->ddev->dev,
2197                 &dev_attr_s3_debug);
2198 #endif
2199
2200         return 0;
2201 }
2202
2203 static bool modeset_required(struct drm_crtc_state *crtc_state,
2204                              struct dc_stream_state *new_stream,
2205                              struct dc_stream_state *old_stream)
2206 {
2207         if (!drm_atomic_crtc_needs_modeset(crtc_state))
2208                 return false;
2209
2210         if (!crtc_state->enable)
2211                 return false;
2212
2213         return crtc_state->active;
2214 }
2215
2216 static bool modereset_required(struct drm_crtc_state *crtc_state)
2217 {
2218         if (!drm_atomic_crtc_needs_modeset(crtc_state))
2219                 return false;
2220
2221         return !crtc_state->enable || !crtc_state->active;
2222 }
2223
2224 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
2225 {
2226         drm_encoder_cleanup(encoder);
2227         kfree(encoder);
2228 }
2229
2230 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
2231         .destroy = amdgpu_dm_encoder_destroy,
2232 };
2233
2234 static bool fill_rects_from_plane_state(const struct drm_plane_state *state,
2235                                         struct dc_plane_state *plane_state)
2236 {
2237         plane_state->src_rect.x = state->src_x >> 16;
2238         plane_state->src_rect.y = state->src_y >> 16;
2239         /* we ignore the mantissa for now and do not deal with floating pixels :( */
2240         plane_state->src_rect.width = state->src_w >> 16;
2241
2242         if (plane_state->src_rect.width == 0)
2243                 return false;
2244
2245         plane_state->src_rect.height = state->src_h >> 16;
2246         if (plane_state->src_rect.height == 0)
2247                 return false;
2248
2249         plane_state->dst_rect.x = state->crtc_x;
2250         plane_state->dst_rect.y = state->crtc_y;
2251
2252         if (state->crtc_w == 0)
2253                 return false;
2254
2255         plane_state->dst_rect.width = state->crtc_w;
2256
2257         if (state->crtc_h == 0)
2258                 return false;
2259
2260         plane_state->dst_rect.height = state->crtc_h;
2261
2262         plane_state->clip_rect = plane_state->dst_rect;
2263
2264         switch (state->rotation & DRM_MODE_ROTATE_MASK) {
2265         case DRM_MODE_ROTATE_0:
2266                 plane_state->rotation = ROTATION_ANGLE_0;
2267                 break;
2268         case DRM_MODE_ROTATE_90:
2269                 plane_state->rotation = ROTATION_ANGLE_90;
2270                 break;
2271         case DRM_MODE_ROTATE_180:
2272                 plane_state->rotation = ROTATION_ANGLE_180;
2273                 break;
2274         case DRM_MODE_ROTATE_270:
2275                 plane_state->rotation = ROTATION_ANGLE_270;
2276                 break;
2277         default:
2278                 plane_state->rotation = ROTATION_ANGLE_0;
2279                 break;
2280         }
2281
2282         return true;
2283 }
2284 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
2285                        uint64_t *tiling_flags)
2286 {
2287         struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
2288         int r = amdgpu_bo_reserve(rbo, false);
2289
2290         if (unlikely(r)) {
2291                 /* Don't show error message when returning -ERESTARTSYS */
2292                 if (r != -ERESTARTSYS)
2293                         DRM_ERROR("Unable to reserve buffer: %d\n", r);
2294                 return r;
2295         }
2296
2297         if (tiling_flags)
2298                 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
2299
2300         amdgpu_bo_unreserve(rbo);
2301
2302         return r;
2303 }
2304
2305 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
2306 {
2307         uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
2308
2309         return offset ? (address + offset * 256) : 0;
2310 }
2311
2312 static int fill_plane_dcc_attributes(struct amdgpu_device *adev,
2313                                       const struct amdgpu_framebuffer *afb,
2314                                       const struct dc_plane_state *plane_state,
2315                                       struct dc_plane_dcc_param *dcc,
2316                                       struct dc_plane_address *address,
2317                                       uint64_t info)
2318 {
2319         struct dc *dc = adev->dm.dc;
2320         struct dc_dcc_surface_param input;
2321         struct dc_surface_dcc_cap output;
2322         uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
2323         uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
2324         uint64_t dcc_address;
2325
2326         memset(&input, 0, sizeof(input));
2327         memset(&output, 0, sizeof(output));
2328
2329         if (!offset)
2330                 return 0;
2331
2332         if (plane_state->address.type != PLN_ADDR_TYPE_GRAPHICS)
2333                 return 0;
2334
2335         if (!dc->cap_funcs.get_dcc_compression_cap)
2336                 return -EINVAL;
2337
2338         input.format = plane_state->format;
2339         input.surface_size.width =
2340                 plane_state->plane_size.grph.surface_size.width;
2341         input.surface_size.height =
2342                 plane_state->plane_size.grph.surface_size.height;
2343         input.swizzle_mode = plane_state->tiling_info.gfx9.swizzle;
2344
2345         if (plane_state->rotation == ROTATION_ANGLE_0 ||
2346             plane_state->rotation == ROTATION_ANGLE_180)
2347                 input.scan = SCAN_DIRECTION_HORIZONTAL;
2348         else if (plane_state->rotation == ROTATION_ANGLE_90 ||
2349                  plane_state->rotation == ROTATION_ANGLE_270)
2350                 input.scan = SCAN_DIRECTION_VERTICAL;
2351
2352         if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
2353                 return -EINVAL;
2354
2355         if (!output.capable)
2356                 return -EINVAL;
2357
2358         if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
2359                 return -EINVAL;
2360
2361         dcc->enable = 1;
2362         dcc->grph.meta_pitch =
2363                 AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
2364         dcc->grph.independent_64b_blks = i64b;
2365
2366         dcc_address = get_dcc_address(afb->address, info);
2367         address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
2368         address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
2369
2370         return 0;
2371 }
2372
2373 static int
2374 fill_plane_tiling_attributes(struct amdgpu_device *adev,
2375                              const struct amdgpu_framebuffer *afb,
2376                              const struct dc_plane_state *plane_state,
2377                              union dc_tiling_info *tiling_info,
2378                              struct dc_plane_dcc_param *dcc,
2379                              struct dc_plane_address *address,
2380                              uint64_t tiling_flags)
2381 {
2382         int ret;
2383
2384         memset(tiling_info, 0, sizeof(*tiling_info));
2385         memset(dcc, 0, sizeof(*dcc));
2386
2387         /* Fill GFX8 params */
2388         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
2389                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
2390
2391                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
2392                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
2393                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
2394                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
2395                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
2396
2397                 /* XXX fix me for VI */
2398                 tiling_info->gfx8.num_banks = num_banks;
2399                 tiling_info->gfx8.array_mode =
2400                                 DC_ARRAY_2D_TILED_THIN1;
2401                 tiling_info->gfx8.tile_split = tile_split;
2402                 tiling_info->gfx8.bank_width = bankw;
2403                 tiling_info->gfx8.bank_height = bankh;
2404                 tiling_info->gfx8.tile_aspect = mtaspect;
2405                 tiling_info->gfx8.tile_mode =
2406                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
2407         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
2408                         == DC_ARRAY_1D_TILED_THIN1) {
2409                 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
2410         }
2411
2412         tiling_info->gfx8.pipe_config =
2413                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
2414
2415         if (adev->asic_type == CHIP_VEGA10 ||
2416             adev->asic_type == CHIP_VEGA12 ||
2417             adev->asic_type == CHIP_VEGA20 ||
2418             adev->asic_type == CHIP_RAVEN) {
2419                 /* Fill GFX9 params */
2420                 tiling_info->gfx9.num_pipes =
2421                         adev->gfx.config.gb_addr_config_fields.num_pipes;
2422                 tiling_info->gfx9.num_banks =
2423                         adev->gfx.config.gb_addr_config_fields.num_banks;
2424                 tiling_info->gfx9.pipe_interleave =
2425                         adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
2426                 tiling_info->gfx9.num_shader_engines =
2427                         adev->gfx.config.gb_addr_config_fields.num_se;
2428                 tiling_info->gfx9.max_compressed_frags =
2429                         adev->gfx.config.gb_addr_config_fields.max_compress_frags;
2430                 tiling_info->gfx9.num_rb_per_se =
2431                         adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
2432                 tiling_info->gfx9.swizzle =
2433                         AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
2434                 tiling_info->gfx9.shaderEnable = 1;
2435
2436                 ret = fill_plane_dcc_attributes(adev, afb, plane_state, dcc,
2437                                                 address, tiling_flags);
2438                 if (ret)
2439                         return ret;
2440         }
2441
2442         return 0;
2443 }
2444
2445 static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
2446                                          struct dc_plane_state *plane_state,
2447                                          const struct amdgpu_framebuffer *amdgpu_fb)
2448 {
2449         uint64_t tiling_flags;
2450         unsigned int awidth;
2451         const struct drm_framebuffer *fb = &amdgpu_fb->base;
2452         int ret = 0;
2453         struct drm_format_name_buf format_name;
2454
2455         ret = get_fb_info(
2456                 amdgpu_fb,
2457                 &tiling_flags);
2458
2459         if (ret)
2460                 return ret;
2461
2462         switch (fb->format->format) {
2463         case DRM_FORMAT_C8:
2464                 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
2465                 break;
2466         case DRM_FORMAT_RGB565:
2467                 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
2468                 break;
2469         case DRM_FORMAT_XRGB8888:
2470         case DRM_FORMAT_ARGB8888:
2471                 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
2472                 break;
2473         case DRM_FORMAT_XRGB2101010:
2474         case DRM_FORMAT_ARGB2101010:
2475                 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
2476                 break;
2477         case DRM_FORMAT_XBGR2101010:
2478         case DRM_FORMAT_ABGR2101010:
2479                 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
2480                 break;
2481         case DRM_FORMAT_XBGR8888:
2482         case DRM_FORMAT_ABGR8888:
2483                 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
2484                 break;
2485         case DRM_FORMAT_NV21:
2486                 plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
2487                 break;
2488         case DRM_FORMAT_NV12:
2489                 plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
2490                 break;
2491         default:
2492                 DRM_ERROR("Unsupported screen format %s\n",
2493                           drm_get_format_name(fb->format->format, &format_name));
2494                 return -EINVAL;
2495         }
2496
2497         memset(&plane_state->address, 0, sizeof(plane_state->address));
2498
2499         if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
2500                 plane_state->address.type = PLN_ADDR_TYPE_GRAPHICS;
2501                 plane_state->plane_size.grph.surface_size.x = 0;
2502                 plane_state->plane_size.grph.surface_size.y = 0;
2503                 plane_state->plane_size.grph.surface_size.width = fb->width;
2504                 plane_state->plane_size.grph.surface_size.height = fb->height;
2505                 plane_state->plane_size.grph.surface_pitch =
2506                                 fb->pitches[0] / fb->format->cpp[0];
2507                 /* TODO: unhardcode */
2508                 plane_state->color_space = COLOR_SPACE_SRGB;
2509
2510         } else {
2511                 awidth = ALIGN(fb->width, 64);
2512                 plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
2513                 plane_state->plane_size.video.luma_size.x = 0;
2514                 plane_state->plane_size.video.luma_size.y = 0;
2515                 plane_state->plane_size.video.luma_size.width = awidth;
2516                 plane_state->plane_size.video.luma_size.height = fb->height;
2517                 /* TODO: unhardcode */
2518                 plane_state->plane_size.video.luma_pitch = awidth;
2519
2520                 plane_state->plane_size.video.chroma_size.x = 0;
2521                 plane_state->plane_size.video.chroma_size.y = 0;
2522                 plane_state->plane_size.video.chroma_size.width = awidth;
2523                 plane_state->plane_size.video.chroma_size.height = fb->height;
2524                 plane_state->plane_size.video.chroma_pitch = awidth / 2;
2525
2526                 /* TODO: unhardcode */
2527                 plane_state->color_space = COLOR_SPACE_YCBCR709;
2528         }
2529
2530         fill_plane_tiling_attributes(adev, amdgpu_fb, plane_state,
2531                                      &plane_state->tiling_info,
2532                                      &plane_state->dcc,
2533                                      &plane_state->address,
2534                                      tiling_flags);
2535
2536         plane_state->visible = true;
2537         plane_state->scaling_quality.h_taps_c = 0;
2538         plane_state->scaling_quality.v_taps_c = 0;
2539
2540         /* is this needed? is plane_state zeroed at allocation? */
2541         plane_state->scaling_quality.h_taps = 0;
2542         plane_state->scaling_quality.v_taps = 0;
2543         plane_state->stereo_format = PLANE_STEREO_FORMAT_NONE;
2544
2545         return ret;
2546
2547 }
2548
2549 static int fill_plane_attributes(struct amdgpu_device *adev,
2550                                  struct dc_plane_state *dc_plane_state,
2551                                  struct drm_plane_state *plane_state,
2552                                  struct drm_crtc_state *crtc_state)
2553 {
2554         const struct amdgpu_framebuffer *amdgpu_fb =
2555                 to_amdgpu_framebuffer(plane_state->fb);
2556         const struct drm_crtc *crtc = plane_state->crtc;
2557         int ret = 0;
2558
2559         if (!fill_rects_from_plane_state(plane_state, dc_plane_state))
2560                 return -EINVAL;
2561
2562         ret = fill_plane_attributes_from_fb(
2563                 crtc->dev->dev_private,
2564                 dc_plane_state,
2565                 amdgpu_fb);
2566
2567         if (ret)
2568                 return ret;
2569
2570         /*
2571          * Always set input transfer function, since plane state is refreshed
2572          * every time.
2573          */
2574         ret = amdgpu_dm_set_degamma_lut(crtc_state, dc_plane_state);
2575         if (ret) {
2576                 dc_transfer_func_release(dc_plane_state->in_transfer_func);
2577                 dc_plane_state->in_transfer_func = NULL;
2578         }
2579
2580         return ret;
2581 }
2582
2583 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
2584                                            const struct dm_connector_state *dm_state,
2585                                            struct dc_stream_state *stream)
2586 {
2587         enum amdgpu_rmx_type rmx_type;
2588
2589         struct rect src = { 0 }; /* viewport in composition space*/
2590         struct rect dst = { 0 }; /* stream addressable area */
2591
2592         /* no mode. nothing to be done */
2593         if (!mode)
2594                 return;
2595
2596         /* Full screen scaling by default */
2597         src.width = mode->hdisplay;
2598         src.height = mode->vdisplay;
2599         dst.width = stream->timing.h_addressable;
2600         dst.height = stream->timing.v_addressable;
2601
2602         if (dm_state) {
2603                 rmx_type = dm_state->scaling;
2604                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
2605                         if (src.width * dst.height <
2606                                         src.height * dst.width) {
2607                                 /* height needs less upscaling/more downscaling */
2608                                 dst.width = src.width *
2609                                                 dst.height / src.height;
2610                         } else {
2611                                 /* width needs less upscaling/more downscaling */
2612                                 dst.height = src.height *
2613                                                 dst.width / src.width;
2614                         }
2615                 } else if (rmx_type == RMX_CENTER) {
2616                         dst = src;
2617                 }
2618
2619                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
2620                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
2621
2622                 if (dm_state->underscan_enable) {
2623                         dst.x += dm_state->underscan_hborder / 2;
2624                         dst.y += dm_state->underscan_vborder / 2;
2625                         dst.width -= dm_state->underscan_hborder;
2626                         dst.height -= dm_state->underscan_vborder;
2627                 }
2628         }
2629
2630         stream->src = src;
2631         stream->dst = dst;
2632
2633         DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
2634                         dst.x, dst.y, dst.width, dst.height);
2635
2636 }
2637
2638 static enum dc_color_depth
2639 convert_color_depth_from_display_info(const struct drm_connector *connector)
2640 {
2641         struct dm_connector_state *dm_conn_state =
2642                 to_dm_connector_state(connector->state);
2643         uint32_t bpc = connector->display_info.bpc;
2644
2645         /* TODO: Remove this when there's support for max_bpc in drm */
2646         if (dm_conn_state && bpc > dm_conn_state->max_bpc)
2647                 /* Round down to nearest even number. */
2648                 bpc = dm_conn_state->max_bpc - (dm_conn_state->max_bpc & 1);
2649
2650         switch (bpc) {
2651         case 0:
2652                 /*
2653                  * Temporary Work around, DRM doesn't parse color depth for
2654                  * EDID revision before 1.4
2655                  * TODO: Fix edid parsing
2656                  */
2657                 return COLOR_DEPTH_888;
2658         case 6:
2659                 return COLOR_DEPTH_666;
2660         case 8:
2661                 return COLOR_DEPTH_888;
2662         case 10:
2663                 return COLOR_DEPTH_101010;
2664         case 12:
2665                 return COLOR_DEPTH_121212;
2666         case 14:
2667                 return COLOR_DEPTH_141414;
2668         case 16:
2669                 return COLOR_DEPTH_161616;
2670         default:
2671                 return COLOR_DEPTH_UNDEFINED;
2672         }
2673 }
2674
2675 static enum dc_aspect_ratio
2676 get_aspect_ratio(const struct drm_display_mode *mode_in)
2677 {
2678         /* 1-1 mapping, since both enums follow the HDMI spec. */
2679         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
2680 }
2681
2682 static enum dc_color_space
2683 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
2684 {
2685         enum dc_color_space color_space = COLOR_SPACE_SRGB;
2686
2687         switch (dc_crtc_timing->pixel_encoding) {
2688         case PIXEL_ENCODING_YCBCR422:
2689         case PIXEL_ENCODING_YCBCR444:
2690         case PIXEL_ENCODING_YCBCR420:
2691         {
2692                 /*
2693                  * 27030khz is the separation point between HDTV and SDTV
2694                  * according to HDMI spec, we use YCbCr709 and YCbCr601
2695                  * respectively
2696                  */
2697                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
2698                         if (dc_crtc_timing->flags.Y_ONLY)
2699                                 color_space =
2700                                         COLOR_SPACE_YCBCR709_LIMITED;
2701                         else
2702                                 color_space = COLOR_SPACE_YCBCR709;
2703                 } else {
2704                         if (dc_crtc_timing->flags.Y_ONLY)
2705                                 color_space =
2706                                         COLOR_SPACE_YCBCR601_LIMITED;
2707                         else
2708                                 color_space = COLOR_SPACE_YCBCR601;
2709                 }
2710
2711         }
2712         break;
2713         case PIXEL_ENCODING_RGB:
2714                 color_space = COLOR_SPACE_SRGB;
2715                 break;
2716
2717         default:
2718                 WARN_ON(1);
2719                 break;
2720         }
2721
2722         return color_space;
2723 }
2724
2725 static void reduce_mode_colour_depth(struct dc_crtc_timing *timing_out)
2726 {
2727         if (timing_out->display_color_depth <= COLOR_DEPTH_888)
2728                 return;
2729
2730         timing_out->display_color_depth--;
2731 }
2732
2733 static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_out,
2734                                                 const struct drm_display_info *info)
2735 {
2736         int normalized_clk;
2737         if (timing_out->display_color_depth <= COLOR_DEPTH_888)
2738                 return;
2739         do {
2740                 normalized_clk = timing_out->pix_clk_100hz / 10;
2741                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
2742                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
2743                         normalized_clk /= 2;
2744                 /* Adjusting pix clock following on HDMI spec based on colour depth */
2745                 switch (timing_out->display_color_depth) {
2746                 case COLOR_DEPTH_101010:
2747                         normalized_clk = (normalized_clk * 30) / 24;
2748                         break;
2749                 case COLOR_DEPTH_121212:
2750                         normalized_clk = (normalized_clk * 36) / 24;
2751                         break;
2752                 case COLOR_DEPTH_161616:
2753                         normalized_clk = (normalized_clk * 48) / 24;
2754                         break;
2755                 default:
2756                         return;
2757                 }
2758                 if (normalized_clk <= info->max_tmds_clock)
2759                         return;
2760                 reduce_mode_colour_depth(timing_out);
2761
2762         } while (timing_out->display_color_depth > COLOR_DEPTH_888);
2763
2764 }
2765
2766 static void
2767 fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
2768                                              const struct drm_display_mode *mode_in,
2769                                              const struct drm_connector *connector,
2770                                              const struct dc_stream_state *old_stream)
2771 {
2772         struct dc_crtc_timing *timing_out = &stream->timing;
2773         const struct drm_display_info *info = &connector->display_info;
2774
2775         memset(timing_out, 0, sizeof(struct dc_crtc_timing));
2776
2777         timing_out->h_border_left = 0;
2778         timing_out->h_border_right = 0;
2779         timing_out->v_border_top = 0;
2780         timing_out->v_border_bottom = 0;
2781         /* TODO: un-hardcode */
2782         if (drm_mode_is_420_only(info, mode_in)
2783                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
2784                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
2785         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
2786                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
2787                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
2788         else
2789                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
2790
2791         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
2792         timing_out->display_color_depth = convert_color_depth_from_display_info(
2793                         connector);
2794         timing_out->scan_type = SCANNING_TYPE_NODATA;
2795         timing_out->hdmi_vic = 0;
2796
2797         if(old_stream) {
2798                 timing_out->vic = old_stream->timing.vic;
2799                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
2800                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
2801         } else {
2802                 timing_out->vic = drm_match_cea_mode(mode_in);
2803                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
2804                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
2805                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
2806                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
2807         }
2808
2809         timing_out->h_addressable = mode_in->crtc_hdisplay;
2810         timing_out->h_total = mode_in->crtc_htotal;
2811         timing_out->h_sync_width =
2812                 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
2813         timing_out->h_front_porch =
2814                 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
2815         timing_out->v_total = mode_in->crtc_vtotal;
2816         timing_out->v_addressable = mode_in->crtc_vdisplay;
2817         timing_out->v_front_porch =
2818                 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
2819         timing_out->v_sync_width =
2820                 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
2821         timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
2822         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
2823
2824         stream->output_color_space = get_output_color_space(timing_out);
2825
2826         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
2827         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
2828         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
2829                 adjust_colour_depth_from_display_info(timing_out, info);
2830 }
2831
2832 static void fill_audio_info(struct audio_info *audio_info,
2833                             const struct drm_connector *drm_connector,
2834                             const struct dc_sink *dc_sink)
2835 {
2836         int i = 0;
2837         int cea_revision = 0;
2838         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
2839
2840         audio_info->manufacture_id = edid_caps->manufacturer_id;
2841         audio_info->product_id = edid_caps->product_id;
2842
2843         cea_revision = drm_connector->display_info.cea_rev;
2844
2845         strscpy(audio_info->display_name,
2846                 edid_caps->display_name,
2847                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
2848
2849         if (cea_revision >= 3) {
2850                 audio_info->mode_count = edid_caps->audio_mode_count;
2851
2852                 for (i = 0; i < audio_info->mode_count; ++i) {
2853                         audio_info->modes[i].format_code =
2854                                         (enum audio_format_code)
2855                                         (edid_caps->audio_modes[i].format_code);
2856                         audio_info->modes[i].channel_count =
2857                                         edid_caps->audio_modes[i].channel_count;
2858                         audio_info->modes[i].sample_rates.all =
2859                                         edid_caps->audio_modes[i].sample_rate;
2860                         audio_info->modes[i].sample_size =
2861                                         edid_caps->audio_modes[i].sample_size;
2862                 }
2863         }
2864
2865         audio_info->flags.all = edid_caps->speaker_flags;
2866
2867         /* TODO: We only check for the progressive mode, check for interlace mode too */
2868         if (drm_connector->latency_present[0]) {
2869                 audio_info->video_latency = drm_connector->video_latency[0];
2870                 audio_info->audio_latency = drm_connector->audio_latency[0];
2871         }
2872
2873         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
2874
2875 }
2876
2877 static void
2878 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
2879                                       struct drm_display_mode *dst_mode)
2880 {
2881         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
2882         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
2883         dst_mode->crtc_clock = src_mode->crtc_clock;
2884         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
2885         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
2886         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
2887         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
2888         dst_mode->crtc_htotal = src_mode->crtc_htotal;
2889         dst_mode->crtc_hskew = src_mode->crtc_hskew;
2890         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
2891         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
2892         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
2893         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
2894         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
2895 }
2896
2897 static void
2898 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
2899                                         const struct drm_display_mode *native_mode,
2900                                         bool scale_enabled)
2901 {
2902         if (scale_enabled) {
2903                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
2904         } else if (native_mode->clock == drm_mode->clock &&
2905                         native_mode->htotal == drm_mode->htotal &&
2906                         native_mode->vtotal == drm_mode->vtotal) {
2907                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
2908         } else {
2909                 /* no scaling nor amdgpu inserted, no need to patch */
2910         }
2911 }
2912
2913 static struct dc_sink *
2914 create_fake_sink(struct amdgpu_dm_connector *aconnector)
2915 {
2916         struct dc_sink_init_data sink_init_data = { 0 };
2917         struct dc_sink *sink = NULL;
2918         sink_init_data.link = aconnector->dc_link;
2919         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
2920
2921         sink = dc_sink_create(&sink_init_data);
2922         if (!sink) {
2923                 DRM_ERROR("Failed to create sink!\n");
2924                 return NULL;
2925         }
2926         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
2927
2928         return sink;
2929 }
2930
2931 static void set_multisync_trigger_params(
2932                 struct dc_stream_state *stream)
2933 {
2934         if (stream->triggered_crtc_reset.enabled) {
2935                 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
2936                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
2937         }
2938 }
2939
2940 static void set_master_stream(struct dc_stream_state *stream_set[],
2941                               int stream_count)
2942 {
2943         int j, highest_rfr = 0, master_stream = 0;
2944
2945         for (j = 0;  j < stream_count; j++) {
2946                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
2947                         int refresh_rate = 0;
2948
2949                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
2950                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
2951                         if (refresh_rate > highest_rfr) {
2952                                 highest_rfr = refresh_rate;
2953                                 master_stream = j;
2954                         }
2955                 }
2956         }
2957         for (j = 0;  j < stream_count; j++) {
2958                 if (stream_set[j])
2959                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
2960         }
2961 }
2962
2963 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
2964 {
2965         int i = 0;
2966
2967         if (context->stream_count < 2)
2968                 return;
2969         for (i = 0; i < context->stream_count ; i++) {
2970                 if (!context->streams[i])
2971                         continue;
2972                 /*
2973                  * TODO: add a function to read AMD VSDB bits and set
2974                  * crtc_sync_master.multi_sync_enabled flag
2975                  * For now it's set to false
2976                  */
2977                 set_multisync_trigger_params(context->streams[i]);
2978         }
2979         set_master_stream(context->streams, context->stream_count);
2980 }
2981
2982 static struct dc_stream_state *
2983 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
2984                        const struct drm_display_mode *drm_mode,
2985                        const struct dm_connector_state *dm_state,
2986                        const struct dc_stream_state *old_stream)
2987 {
2988         struct drm_display_mode *preferred_mode = NULL;
2989         struct drm_connector *drm_connector;
2990         struct dc_stream_state *stream = NULL;
2991         struct drm_display_mode mode = *drm_mode;
2992         bool native_mode_found = false;
2993         bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
2994         int mode_refresh;
2995         int preferred_refresh = 0;
2996
2997         struct dc_sink *sink = NULL;
2998         if (aconnector == NULL) {
2999                 DRM_ERROR("aconnector is NULL!\n");
3000                 return stream;
3001         }
3002
3003         drm_connector = &aconnector->base;
3004
3005         if (!aconnector->dc_sink) {
3006                 sink = create_fake_sink(aconnector);
3007                 if (!sink)
3008                         return stream;
3009         } else {
3010                 sink = aconnector->dc_sink;
3011                 dc_sink_retain(sink);
3012         }
3013
3014         stream = dc_create_stream_for_sink(sink);
3015
3016         if (stream == NULL) {
3017                 DRM_ERROR("Failed to create stream for sink!\n");
3018                 goto finish;
3019         }
3020
3021         stream->dm_stream_context = aconnector;
3022
3023         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
3024                 /* Search for preferred mode */
3025                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
3026                         native_mode_found = true;
3027                         break;
3028                 }
3029         }
3030         if (!native_mode_found)
3031                 preferred_mode = list_first_entry_or_null(
3032                                 &aconnector->base.modes,
3033                                 struct drm_display_mode,
3034                                 head);
3035
3036         mode_refresh = drm_mode_vrefresh(&mode);
3037
3038         if (preferred_mode == NULL) {
3039                 /*
3040                  * This may not be an error, the use case is when we have no
3041                  * usermode calls to reset and set mode upon hotplug. In this
3042                  * case, we call set mode ourselves to restore the previous mode
3043                  * and the modelist may not be filled in in time.
3044                  */
3045                 DRM_DEBUG_DRIVER("No preferred mode found\n");
3046         } else {
3047                 decide_crtc_timing_for_drm_display_mode(
3048                                 &mode, preferred_mode,
3049                                 dm_state ? (dm_state->scaling != RMX_OFF) : false);
3050                 preferred_refresh = drm_mode_vrefresh(preferred_mode);
3051         }
3052
3053         if (!dm_state)
3054                 drm_mode_set_crtcinfo(&mode, 0);
3055
3056         /*
3057         * If scaling is enabled and refresh rate didn't change
3058         * we copy the vic and polarities of the old timings
3059         */
3060         if (!scale || mode_refresh != preferred_refresh)
3061                 fill_stream_properties_from_drm_display_mode(stream,
3062                         &mode, &aconnector->base, NULL);
3063         else
3064                 fill_stream_properties_from_drm_display_mode(stream,
3065                         &mode, &aconnector->base, old_stream);
3066
3067         update_stream_scaling_settings(&mode, dm_state, stream);
3068
3069         fill_audio_info(
3070                 &stream->audio_info,
3071                 drm_connector,
3072                 sink);
3073
3074         update_stream_signal(stream, sink);
3075
3076 finish:
3077         dc_sink_release(sink);
3078
3079         return stream;
3080 }
3081
3082 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
3083 {
3084         drm_crtc_cleanup(crtc);
3085         kfree(crtc);
3086 }
3087
3088 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
3089                                   struct drm_crtc_state *state)
3090 {
3091         struct dm_crtc_state *cur = to_dm_crtc_state(state);
3092
3093         /* TODO Destroy dc_stream objects are stream object is flattened */
3094         if (cur->stream)
3095                 dc_stream_release(cur->stream);
3096
3097
3098         __drm_atomic_helper_crtc_destroy_state(state);
3099
3100
3101         kfree(state);
3102 }
3103
3104 static void dm_crtc_reset_state(struct drm_crtc *crtc)
3105 {
3106         struct dm_crtc_state *state;
3107
3108         if (crtc->state)
3109                 dm_crtc_destroy_state(crtc, crtc->state);
3110
3111         state = kzalloc(sizeof(*state), GFP_KERNEL);
3112         if (WARN_ON(!state))
3113                 return;
3114
3115         crtc->state = &state->base;
3116         crtc->state->crtc = crtc;
3117
3118 }
3119
3120 static struct drm_crtc_state *
3121 dm_crtc_duplicate_state(struct drm_crtc *crtc)
3122 {
3123         struct dm_crtc_state *state, *cur;
3124
3125         cur = to_dm_crtc_state(crtc->state);
3126
3127         if (WARN_ON(!crtc->state))
3128                 return NULL;
3129
3130         state = kzalloc(sizeof(*state), GFP_KERNEL);
3131         if (!state)
3132                 return NULL;
3133
3134         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
3135
3136         if (cur->stream) {
3137                 state->stream = cur->stream;
3138                 dc_stream_retain(state->stream);
3139         }
3140
3141         state->vrr_params = cur->vrr_params;
3142         state->vrr_infopacket = cur->vrr_infopacket;
3143         state->abm_level = cur->abm_level;
3144         state->vrr_supported = cur->vrr_supported;
3145         state->freesync_config = cur->freesync_config;
3146         state->crc_enabled = cur->crc_enabled;
3147
3148         /* TODO Duplicate dc_stream after objects are stream object is flattened */
3149
3150         return &state->base;
3151 }
3152
3153
3154 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
3155 {
3156         enum dc_irq_source irq_source;
3157         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
3158         struct amdgpu_device *adev = crtc->dev->dev_private;
3159
3160         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
3161         return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
3162 }
3163
3164 static int dm_enable_vblank(struct drm_crtc *crtc)
3165 {
3166         return dm_set_vblank(crtc, true);
3167 }
3168
3169 static void dm_disable_vblank(struct drm_crtc *crtc)
3170 {
3171         dm_set_vblank(crtc, false);
3172 }
3173
3174 /* Implemented only the options currently availible for the driver */
3175 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
3176         .reset = dm_crtc_reset_state,
3177         .destroy = amdgpu_dm_crtc_destroy,
3178         .gamma_set = drm_atomic_helper_legacy_gamma_set,
3179         .set_config = drm_atomic_helper_set_config,
3180         .page_flip = drm_atomic_helper_page_flip,
3181         .atomic_duplicate_state = dm_crtc_duplicate_state,
3182         .atomic_destroy_state = dm_crtc_destroy_state,
3183         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
3184         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
3185         .enable_vblank = dm_enable_vblank,
3186         .disable_vblank = dm_disable_vblank,
3187 };
3188
3189 static enum drm_connector_status
3190 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
3191 {
3192         bool connected;
3193         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
3194
3195         /*
3196          * Notes:
3197          * 1. This interface is NOT called in context of HPD irq.
3198          * 2. This interface *is called* in context of user-mode ioctl. Which
3199          * makes it a bad place for *any* MST-related activity.
3200          */
3201
3202         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
3203             !aconnector->fake_enable)
3204                 connected = (aconnector->dc_sink != NULL);
3205         else
3206                 connected = (aconnector->base.force == DRM_FORCE_ON);
3207
3208         return (connected ? connector_status_connected :
3209                         connector_status_disconnected);
3210 }
3211
3212 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
3213                                             struct drm_connector_state *connector_state,
3214                                             struct drm_property *property,
3215                                             uint64_t val)
3216 {
3217         struct drm_device *dev = connector->dev;
3218         struct amdgpu_device *adev = dev->dev_private;
3219         struct dm_connector_state *dm_old_state =
3220                 to_dm_connector_state(connector->state);
3221         struct dm_connector_state *dm_new_state =
3222                 to_dm_connector_state(connector_state);
3223
3224         int ret = -EINVAL;
3225
3226         if (property == dev->mode_config.scaling_mode_property) {
3227                 enum amdgpu_rmx_type rmx_type;
3228
3229                 switch (val) {
3230                 case DRM_MODE_SCALE_CENTER:
3231                         rmx_type = RMX_CENTER;
3232                         break;
3233                 case DRM_MODE_SCALE_ASPECT:
3234                         rmx_type = RMX_ASPECT;
3235                         break;
3236                 case DRM_MODE_SCALE_FULLSCREEN:
3237                         rmx_type = RMX_FULL;
3238                         break;
3239                 case DRM_MODE_SCALE_NONE:
3240                 default:
3241                         rmx_type = RMX_OFF;
3242                         break;
3243                 }
3244
3245                 if (dm_old_state->scaling == rmx_type)
3246                         return 0;
3247
3248                 dm_new_state->scaling = rmx_type;
3249                 ret = 0;
3250         } else if (property == adev->mode_info.underscan_hborder_property) {
3251                 dm_new_state->underscan_hborder = val;
3252                 ret = 0;
3253         } else if (property == adev->mode_info.underscan_vborder_property) {
3254                 dm_new_state->underscan_vborder = val;
3255                 ret = 0;
3256         } else if (property == adev->mode_info.underscan_property) {
3257                 dm_new_state->underscan_enable = val;
3258                 ret = 0;
3259         } else if (property == adev->mode_info.max_bpc_property) {
3260                 dm_new_state->max_bpc = val;
3261                 ret = 0;
3262         } else if (property == adev->mode_info.abm_level_property) {
3263                 dm_new_state->abm_level = val;
3264                 ret = 0;
3265         }
3266
3267         return ret;
3268 }
3269
3270 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
3271                                             const struct drm_connector_state *state,
3272                                             struct drm_property *property,
3273                                             uint64_t *val)
3274 {
3275         struct drm_device *dev = connector->dev;
3276         struct amdgpu_device *adev = dev->dev_private;
3277         struct dm_connector_state *dm_state =
3278                 to_dm_connector_state(state);
3279         int ret = -EINVAL;
3280
3281         if (property == dev->mode_config.scaling_mode_property) {
3282                 switch (dm_state->scaling) {
3283                 case RMX_CENTER:
3284                         *val = DRM_MODE_SCALE_CENTER;
3285                         break;
3286                 case RMX_ASPECT:
3287                         *val = DRM_MODE_SCALE_ASPECT;
3288                         break;
3289                 case RMX_FULL:
3290                         *val = DRM_MODE_SCALE_FULLSCREEN;
3291                         break;
3292                 case RMX_OFF:
3293                 default:
3294                         *val = DRM_MODE_SCALE_NONE;
3295                         break;
3296                 }
3297                 ret = 0;
3298         } else if (property == adev->mode_info.underscan_hborder_property) {
3299                 *val = dm_state->underscan_hborder;
3300                 ret = 0;
3301         } else if (property == adev->mode_info.underscan_vborder_property) {
3302                 *val = dm_state->underscan_vborder;
3303                 ret = 0;
3304         } else if (property == adev->mode_info.underscan_property) {
3305                 *val = dm_state->underscan_enable;
3306                 ret = 0;
3307         } else if (property == adev->mode_info.max_bpc_property) {
3308                 *val = dm_state->max_bpc;
3309                 ret = 0;
3310         } else if (property == adev->mode_info.abm_level_property) {
3311                 *val = dm_state->abm_level;
3312                 ret = 0;
3313         }
3314
3315         return ret;
3316 }
3317
3318 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
3319 {
3320         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
3321         const struct dc_link *link = aconnector->dc_link;
3322         struct amdgpu_device *adev = connector->dev->dev_private;
3323         struct amdgpu_display_manager *dm = &adev->dm;
3324
3325 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3326         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3327
3328         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3329             link->type != dc_connection_none &&
3330             dm->backlight_dev) {
3331                 backlight_device_unregister(dm->backlight_dev);
3332                 dm->backlight_dev = NULL;
3333         }
3334 #endif
3335
3336         if (aconnector->dc_em_sink)
3337                 dc_sink_release(aconnector->dc_em_sink);
3338         aconnector->dc_em_sink = NULL;
3339         if (aconnector->dc_sink)
3340                 dc_sink_release(aconnector->dc_sink);
3341         aconnector->dc_sink = NULL;
3342
3343         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
3344         drm_connector_unregister(connector);
3345         drm_connector_cleanup(connector);
3346         kfree(connector);
3347 }
3348
3349 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
3350 {
3351         struct dm_connector_state *state =
3352                 to_dm_connector_state(connector->state);
3353
3354         if (connector->state)
3355                 __drm_atomic_helper_connector_destroy_state(connector->state);
3356
3357         kfree(state);
3358
3359         state = kzalloc(sizeof(*state), GFP_KERNEL);
3360
3361         if (state) {
3362                 state->scaling = RMX_OFF;
3363                 state->underscan_enable = false;
3364                 state->underscan_hborder = 0;
3365                 state->underscan_vborder = 0;
3366                 state->max_bpc = 8;
3367
3368                 __drm_atomic_helper_connector_reset(connector, &state->base);
3369         }
3370 }
3371
3372 struct drm_connector_state *
3373 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
3374 {
3375         struct dm_connector_state *state =
3376                 to_dm_connector_state(connector->state);
3377
3378         struct dm_connector_state *new_state =
3379                         kmemdup(state, sizeof(*state), GFP_KERNEL);
3380
3381         if (!new_state)
3382                 return NULL;
3383
3384         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
3385
3386         new_state->freesync_capable = state->freesync_capable;
3387         new_state->abm_level = state->abm_level;
3388         new_state->scaling = state->scaling;
3389         new_state->underscan_enable = state->underscan_enable;
3390         new_state->underscan_hborder = state->underscan_hborder;
3391         new_state->underscan_vborder = state->underscan_vborder;
3392         new_state->max_bpc = state->max_bpc;
3393
3394         return &new_state->base;
3395 }
3396
3397 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
3398         .reset = amdgpu_dm_connector_funcs_reset,
3399         .detect = amdgpu_dm_connector_detect,
3400         .fill_modes = drm_helper_probe_single_connector_modes,
3401         .destroy = amdgpu_dm_connector_destroy,
3402         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
3403         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
3404         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
3405         .atomic_get_property = amdgpu_dm_connector_atomic_get_property
3406 };
3407
3408 static int get_modes(struct drm_connector *connector)
3409 {
3410         return amdgpu_dm_connector_get_modes(connector);
3411 }
3412
3413 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
3414 {
3415         struct dc_sink_init_data init_params = {
3416                         .link = aconnector->dc_link,
3417                         .sink_signal = SIGNAL_TYPE_VIRTUAL
3418         };
3419         struct edid *edid;
3420
3421         if (!aconnector->base.edid_blob_ptr) {
3422                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
3423                                 aconnector->base.name);
3424
3425                 aconnector->base.force = DRM_FORCE_OFF;
3426                 aconnector->base.override_edid = false;
3427                 return;
3428         }
3429
3430         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
3431
3432         aconnector->edid = edid;
3433
3434         aconnector->dc_em_sink = dc_link_add_remote_sink(
3435                 aconnector->dc_link,
3436                 (uint8_t *)edid,
3437                 (edid->extensions + 1) * EDID_LENGTH,
3438                 &init_params);
3439
3440         if (aconnector->base.force == DRM_FORCE_ON) {
3441                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
3442                 aconnector->dc_link->local_sink :
3443                 aconnector->dc_em_sink;
3444                 dc_sink_retain(aconnector->dc_sink);
3445         }
3446 }
3447
3448 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
3449 {
3450         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
3451
3452         /*
3453          * In case of headless boot with force on for DP managed connector
3454          * Those settings have to be != 0 to get initial modeset
3455          */
3456         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
3457                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
3458                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
3459         }
3460
3461
3462         aconnector->base.override_edid = true;
3463         create_eml_sink(aconnector);
3464 }
3465
3466 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
3467                                    struct drm_display_mode *mode)
3468 {
3469         int result = MODE_ERROR;
3470         struct dc_sink *dc_sink;
3471         struct amdgpu_device *adev = connector->dev->dev_private;
3472         /* TODO: Unhardcode stream count */
3473         struct dc_stream_state *stream;
3474         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
3475         enum dc_status dc_result = DC_OK;
3476
3477         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
3478                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
3479                 return result;
3480
3481         /*
3482          * Only run this the first time mode_valid is called to initilialize
3483          * EDID mgmt
3484          */
3485         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
3486                 !aconnector->dc_em_sink)
3487                 handle_edid_mgmt(aconnector);
3488
3489         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
3490
3491         if (dc_sink == NULL) {
3492                 DRM_ERROR("dc_sink is NULL!\n");
3493                 goto fail;
3494         }
3495
3496         stream = create_stream_for_sink(aconnector, mode, NULL, NULL);
3497         if (stream == NULL) {
3498                 DRM_ERROR("Failed to create stream for sink!\n");
3499                 goto fail;
3500         }
3501
3502         dc_result = dc_validate_stream(adev->dm.dc, stream);
3503
3504         if (dc_result == DC_OK)
3505                 result = MODE_OK;
3506         else
3507                 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
3508                               mode->vdisplay,
3509                               mode->hdisplay,
3510                               mode->clock,
3511                               dc_result);
3512
3513         dc_stream_release(stream);
3514
3515 fail:
3516         /* TODO: error handling*/
3517         return result;
3518 }
3519
3520 static const struct drm_connector_helper_funcs
3521 amdgpu_dm_connector_helper_funcs = {
3522         /*
3523          * If hotplugging a second bigger display in FB Con mode, bigger resolution
3524          * modes will be filtered by drm_mode_validate_size(), and those modes
3525          * are missing after user start lightdm. So we need to renew modes list.
3526          * in get_modes call back, not just return the modes count
3527          */
3528         .get_modes = get_modes,
3529         .mode_valid = amdgpu_dm_connector_mode_valid,
3530 };
3531
3532 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
3533 {
3534 }
3535
3536 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
3537                                        struct drm_crtc_state *state)
3538 {
3539         struct amdgpu_device *adev = crtc->dev->dev_private;
3540         struct dc *dc = adev->dm.dc;
3541         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
3542         int ret = -EINVAL;
3543
3544         if (unlikely(!dm_crtc_state->stream &&
3545                      modeset_required(state, NULL, dm_crtc_state->stream))) {
3546                 WARN_ON(1);
3547                 return ret;
3548         }
3549
3550         /* In some use cases, like reset, no stream is attached */
3551         if (!dm_crtc_state->stream)
3552                 return 0;
3553
3554         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
3555                 return 0;
3556
3557         return ret;
3558 }
3559
3560 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
3561                                       const struct drm_display_mode *mode,
3562                                       struct drm_display_mode *adjusted_mode)
3563 {
3564         return true;
3565 }
3566
3567 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
3568         .disable = dm_crtc_helper_disable,
3569         .atomic_check = dm_crtc_helper_atomic_check,
3570         .mode_fixup = dm_crtc_helper_mode_fixup
3571 };
3572
3573 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
3574 {
3575
3576 }
3577
3578 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
3579                                           struct drm_crtc_state *crtc_state,
3580                                           struct drm_connector_state *conn_state)
3581 {
3582         return 0;
3583 }
3584
3585 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
3586         .disable = dm_encoder_helper_disable,
3587         .atomic_check = dm_encoder_helper_atomic_check
3588 };
3589
3590 static void dm_drm_plane_reset(struct drm_plane *plane)
3591 {
3592         struct dm_plane_state *amdgpu_state = NULL;
3593
3594         if (plane->state)
3595                 plane->funcs->atomic_destroy_state(plane, plane->state);
3596
3597         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
3598         WARN_ON(amdgpu_state == NULL);
3599
3600         if (amdgpu_state) {
3601                 plane->state = &amdgpu_state->base;
3602                 plane->state->plane = plane;
3603                 plane->state->rotation = DRM_MODE_ROTATE_0;
3604         }
3605 }
3606
3607 static struct drm_plane_state *
3608 dm_drm_plane_duplicate_state(struct drm_plane *plane)
3609 {
3610         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
3611
3612         old_dm_plane_state = to_dm_plane_state(plane->state);
3613         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
3614         if (!dm_plane_state)
3615                 return NULL;
3616
3617         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
3618
3619         if (old_dm_plane_state->dc_state) {
3620                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
3621                 dc_plane_state_retain(dm_plane_state->dc_state);
3622         }
3623
3624         return &dm_plane_state->base;
3625 }
3626
3627 void dm_drm_plane_destroy_state(struct drm_plane *plane,
3628                                 struct drm_plane_state *state)
3629 {
3630         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
3631
3632         if (dm_plane_state->dc_state)
3633                 dc_plane_state_release(dm_plane_state->dc_state);
3634
3635         drm_atomic_helper_plane_destroy_state(plane, state);
3636 }
3637
3638 static const struct drm_plane_funcs dm_plane_funcs = {
3639         .update_plane   = drm_atomic_helper_update_plane,
3640         .disable_plane  = drm_atomic_helper_disable_plane,
3641         .destroy        = drm_primary_helper_destroy,
3642         .reset = dm_drm_plane_reset,
3643         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
3644         .atomic_destroy_state = dm_drm_plane_destroy_state,
3645 };
3646
3647 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
3648                                       struct drm_plane_state *new_state)
3649 {
3650         struct amdgpu_framebuffer *afb;
3651         struct drm_gem_object *obj;
3652         struct amdgpu_device *adev;
3653         struct amdgpu_bo *rbo;
3654         uint64_t chroma_addr = 0;
3655         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
3656         uint64_t tiling_flags, dcc_address;
3657         unsigned int awidth;
3658         uint32_t domain;
3659         int r;
3660
3661         dm_plane_state_old = to_dm_plane_state(plane->state);
3662         dm_plane_state_new = to_dm_plane_state(new_state);
3663
3664         if (!new_state->fb) {
3665                 DRM_DEBUG_DRIVER("No FB bound\n");
3666                 return 0;
3667         }
3668
3669         afb = to_amdgpu_framebuffer(new_state->fb);
3670         obj = new_state->fb->obj[0];
3671         rbo = gem_to_amdgpu_bo(obj);
3672         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
3673         r = amdgpu_bo_reserve(rbo, false);
3674         if (unlikely(r != 0))
3675                 return r;
3676
3677         if (plane->type != DRM_PLANE_TYPE_CURSOR)
3678                 domain = amdgpu_display_supported_domains(adev);
3679         else
3680                 domain = AMDGPU_GEM_DOMAIN_VRAM;
3681
3682         r = amdgpu_bo_pin(rbo, domain);
3683         if (unlikely(r != 0)) {
3684                 if (r != -ERESTARTSYS)
3685                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
3686                 amdgpu_bo_unreserve(rbo);
3687                 return r;
3688         }
3689
3690         r = amdgpu_ttm_alloc_gart(&rbo->tbo);
3691         if (unlikely(r != 0)) {
3692                 amdgpu_bo_unpin(rbo);
3693                 amdgpu_bo_unreserve(rbo);
3694                 DRM_ERROR("%p bind failed\n", rbo);
3695                 return r;
3696         }
3697
3698         amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
3699
3700         amdgpu_bo_unreserve(rbo);
3701
3702         afb->address = amdgpu_bo_gpu_offset(rbo);
3703
3704         amdgpu_bo_ref(rbo);
3705
3706         if (dm_plane_state_new->dc_state &&
3707                         dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
3708                 struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
3709
3710                 if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3711                         plane_state->address.grph.addr.low_part = lower_32_bits(afb->address);
3712                         plane_state->address.grph.addr.high_part = upper_32_bits(afb->address);
3713
3714                         dcc_address =
3715                                 get_dcc_address(afb->address, tiling_flags);
3716                         plane_state->address.grph.meta_addr.low_part =
3717                                 lower_32_bits(dcc_address);
3718                         plane_state->address.grph.meta_addr.high_part =
3719                                 upper_32_bits(dcc_address);
3720                 } else {
3721                         awidth = ALIGN(new_state->fb->width, 64);
3722                         plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3723                         plane_state->address.video_progressive.luma_addr.low_part
3724                                                         = lower_32_bits(afb->address);
3725                         plane_state->address.video_progressive.luma_addr.high_part
3726                                                         = upper_32_bits(afb->address);
3727                         chroma_addr = afb->address + (u64)awidth * new_state->fb->height;
3728                         plane_state->address.video_progressive.chroma_addr.low_part
3729                                                         = lower_32_bits(chroma_addr);
3730                         plane_state->address.video_progressive.chroma_addr.high_part
3731                                                         = upper_32_bits(chroma_addr);
3732                 }
3733         }
3734
3735         return 0;
3736 }
3737
3738 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
3739                                        struct drm_plane_state *old_state)
3740 {
3741         struct amdgpu_bo *rbo;
3742         int r;
3743
3744         if (!old_state->fb)
3745                 return;
3746
3747         rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
3748         r = amdgpu_bo_reserve(rbo, false);
3749         if (unlikely(r)) {
3750                 DRM_ERROR("failed to reserve rbo before unpin\n");
3751                 return;
3752         }
3753
3754         amdgpu_bo_unpin(rbo);
3755         amdgpu_bo_unreserve(rbo);
3756         amdgpu_bo_unref(&rbo);
3757 }
3758
3759 static int dm_plane_atomic_check(struct drm_plane *plane,
3760                                  struct drm_plane_state *state)
3761 {
3762         struct amdgpu_device *adev = plane->dev->dev_private;
3763         struct dc *dc = adev->dm.dc;
3764         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
3765
3766         if (!dm_plane_state->dc_state)
3767                 return 0;
3768
3769         if (!fill_rects_from_plane_state(state, dm_plane_state->dc_state))
3770                 return -EINVAL;
3771
3772         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
3773                 return 0;
3774
3775         return -EINVAL;
3776 }
3777
3778 static int dm_plane_atomic_async_check(struct drm_plane *plane,
3779                                        struct drm_plane_state *new_plane_state)
3780 {
3781         struct drm_plane_state *old_plane_state =
3782                 drm_atomic_get_old_plane_state(new_plane_state->state, plane);
3783
3784         /* Only support async updates on cursor planes. */
3785         if (plane->type != DRM_PLANE_TYPE_CURSOR)
3786                 return -EINVAL;
3787
3788         /*
3789          * DRM calls prepare_fb and cleanup_fb on new_plane_state for
3790          * async commits so don't allow fb changes.
3791          */
3792         if (old_plane_state->fb != new_plane_state->fb)
3793                 return -EINVAL;
3794
3795         return 0;
3796 }
3797
3798 static void dm_plane_atomic_async_update(struct drm_plane *plane,
3799                                          struct drm_plane_state *new_state)
3800 {
3801         struct drm_plane_state *old_state =
3802                 drm_atomic_get_old_plane_state(new_state->state, plane);
3803
3804         if (plane->state->fb != new_state->fb)
3805                 drm_atomic_set_fb_for_plane(plane->state, new_state->fb);
3806
3807         plane->state->src_x = new_state->src_x;
3808         plane->state->src_y = new_state->src_y;
3809         plane->state->src_w = new_state->src_w;
3810         plane->state->src_h = new_state->src_h;
3811         plane->state->crtc_x = new_state->crtc_x;
3812         plane->state->crtc_y = new_state->crtc_y;
3813         plane->state->crtc_w = new_state->crtc_w;
3814         plane->state->crtc_h = new_state->crtc_h;
3815
3816         handle_cursor_update(plane, old_state);
3817 }
3818
3819 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
3820         .prepare_fb = dm_plane_helper_prepare_fb,
3821         .cleanup_fb = dm_plane_helper_cleanup_fb,
3822         .atomic_check = dm_plane_atomic_check,
3823         .atomic_async_check = dm_plane_atomic_async_check,
3824         .atomic_async_update = dm_plane_atomic_async_update
3825 };
3826
3827 /*
3828  * TODO: these are currently initialized to rgb formats only.
3829  * For future use cases we should either initialize them dynamically based on
3830  * plane capabilities, or initialize this array to all formats, so internal drm
3831  * check will succeed, and let DC implement proper check
3832  */
3833 static const uint32_t rgb_formats[] = {
3834         DRM_FORMAT_XRGB8888,
3835         DRM_FORMAT_ARGB8888,
3836         DRM_FORMAT_RGBA8888,
3837         DRM_FORMAT_XRGB2101010,
3838         DRM_FORMAT_XBGR2101010,
3839         DRM_FORMAT_ARGB2101010,
3840         DRM_FORMAT_ABGR2101010,
3841         DRM_FORMAT_XBGR8888,
3842         DRM_FORMAT_ABGR8888,
3843 };
3844
3845 static const uint32_t overlay_formats[] = {
3846         DRM_FORMAT_XRGB8888,
3847         DRM_FORMAT_ARGB8888,
3848         DRM_FORMAT_RGBA8888,
3849         DRM_FORMAT_XBGR8888,
3850         DRM_FORMAT_ABGR8888,
3851 };
3852
3853 static const u32 cursor_formats[] = {
3854         DRM_FORMAT_ARGB8888
3855 };
3856
3857 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
3858                                 struct drm_plane *plane,
3859                                 unsigned long possible_crtcs)
3860 {
3861         int res = -EPERM;
3862
3863         switch (plane->type) {
3864         case DRM_PLANE_TYPE_PRIMARY:
3865                 res = drm_universal_plane_init(
3866                                 dm->adev->ddev,
3867                                 plane,
3868                                 possible_crtcs,
3869                                 &dm_plane_funcs,
3870                                 rgb_formats,
3871                                 ARRAY_SIZE(rgb_formats),
3872                                 NULL, plane->type, NULL);
3873                 break;
3874         case DRM_PLANE_TYPE_OVERLAY:
3875                 res = drm_universal_plane_init(
3876                                 dm->adev->ddev,
3877                                 plane,
3878                                 possible_crtcs,
3879                                 &dm_plane_funcs,
3880                                 overlay_formats,
3881                                 ARRAY_SIZE(overlay_formats),
3882                                 NULL, plane->type, NULL);
3883                 break;
3884         case DRM_PLANE_TYPE_CURSOR:
3885                 res = drm_universal_plane_init(
3886                                 dm->adev->ddev,
3887                                 plane,
3888                                 possible_crtcs,
3889                                 &dm_plane_funcs,
3890                                 cursor_formats,
3891                                 ARRAY_SIZE(cursor_formats),
3892                                 NULL, plane->type, NULL);
3893                 break;
3894         }
3895
3896         drm_plane_helper_add(plane, &dm_plane_helper_funcs);
3897
3898         /* Create (reset) the plane state */
3899         if (plane->funcs->reset)
3900                 plane->funcs->reset(plane);
3901
3902
3903         return res;
3904 }
3905
3906 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
3907                                struct drm_plane *plane,
3908                                uint32_t crtc_index)
3909 {
3910         struct amdgpu_crtc *acrtc = NULL;
3911         struct drm_plane *cursor_plane;
3912
3913         int res = -ENOMEM;
3914
3915         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
3916         if (!cursor_plane)
3917                 goto fail;
3918
3919         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
3920         res = amdgpu_dm_plane_init(dm, cursor_plane, 0);
3921
3922         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
3923         if (!acrtc)
3924                 goto fail;
3925
3926         res = drm_crtc_init_with_planes(
3927                         dm->ddev,
3928                         &acrtc->base,
3929                         plane,
3930                         cursor_plane,
3931                         &amdgpu_dm_crtc_funcs, NULL);
3932
3933         if (res)
3934                 goto fail;
3935
3936         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
3937
3938         /* Create (reset) the plane state */
3939         if (acrtc->base.funcs->reset)
3940                 acrtc->base.funcs->reset(&acrtc->base);
3941
3942         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
3943         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
3944
3945         acrtc->crtc_id = crtc_index;
3946         acrtc->base.enabled = false;
3947         acrtc->otg_inst = -1;
3948
3949         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
3950         drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
3951                                    true, MAX_COLOR_LUT_ENTRIES);
3952         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
3953
3954         return 0;
3955
3956 fail:
3957         kfree(acrtc);
3958         kfree(cursor_plane);
3959         return res;
3960 }
3961
3962
3963 static int to_drm_connector_type(enum signal_type st)
3964 {
3965         switch (st) {
3966         case SIGNAL_TYPE_HDMI_TYPE_A:
3967                 return DRM_MODE_CONNECTOR_HDMIA;
3968         case SIGNAL_TYPE_EDP:
3969                 return DRM_MODE_CONNECTOR_eDP;
3970         case SIGNAL_TYPE_LVDS:
3971                 return DRM_MODE_CONNECTOR_LVDS;
3972         case SIGNAL_TYPE_RGB:
3973                 return DRM_MODE_CONNECTOR_VGA;
3974         case SIGNAL_TYPE_DISPLAY_PORT:
3975         case SIGNAL_TYPE_DISPLAY_PORT_MST:
3976                 return DRM_MODE_CONNECTOR_DisplayPort;
3977         case SIGNAL_TYPE_DVI_DUAL_LINK:
3978         case SIGNAL_TYPE_DVI_SINGLE_LINK:
3979                 return DRM_MODE_CONNECTOR_DVID;
3980         case SIGNAL_TYPE_VIRTUAL:
3981                 return DRM_MODE_CONNECTOR_VIRTUAL;
3982
3983         default:
3984                 return DRM_MODE_CONNECTOR_Unknown;
3985         }
3986 }
3987
3988 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
3989 {
3990         return drm_encoder_find(connector->dev, NULL, connector->encoder_ids[0]);
3991 }
3992
3993 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
3994 {
3995         struct drm_encoder *encoder;
3996         struct amdgpu_encoder *amdgpu_encoder;
3997
3998         encoder = amdgpu_dm_connector_to_encoder(connector);
3999
4000         if (encoder == NULL)
4001                 return;
4002
4003         amdgpu_encoder = to_amdgpu_encoder(encoder);
4004
4005         amdgpu_encoder->native_mode.clock = 0;
4006
4007         if (!list_empty(&connector->probed_modes)) {
4008                 struct drm_display_mode *preferred_mode = NULL;
4009
4010                 list_for_each_entry(preferred_mode,
4011                                     &connector->probed_modes,
4012                                     head) {
4013                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
4014                                 amdgpu_encoder->native_mode = *preferred_mode;
4015
4016                         break;
4017                 }
4018
4019         }
4020 }
4021
4022 static struct drm_display_mode *
4023 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
4024                              char *name,
4025                              int hdisplay, int vdisplay)
4026 {
4027         struct drm_device *dev = encoder->dev;
4028         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
4029         struct drm_display_mode *mode = NULL;
4030         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
4031
4032         mode = drm_mode_duplicate(dev, native_mode);
4033
4034         if (mode == NULL)
4035                 return NULL;
4036
4037         mode->hdisplay = hdisplay;
4038         mode->vdisplay = vdisplay;
4039         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
4040         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
4041
4042         return mode;
4043
4044 }
4045
4046 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
4047                                                  struct drm_connector *connector)
4048 {
4049         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
4050         struct drm_display_mode *mode = NULL;
4051         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
4052         struct amdgpu_dm_connector *amdgpu_dm_connector =
4053                                 to_amdgpu_dm_connector(connector);
4054         int i;
4055         int n;
4056         struct mode_size {
4057                 char name[DRM_DISPLAY_MODE_LEN];
4058                 int w;
4059                 int h;
4060         } common_modes[] = {
4061                 {  "640x480",  640,  480},
4062                 {  "800x600",  800,  600},
4063                 { "1024x768", 1024,  768},
4064                 { "1280x720", 1280,  720},
4065                 { "1280x800", 1280,  800},
4066                 {"1280x1024", 1280, 1024},
4067                 { "1440x900", 1440,  900},
4068                 {"1680x1050", 1680, 1050},
4069                 {"1600x1200", 1600, 1200},
4070                 {"1920x1080", 1920, 1080},
4071                 {"1920x1200", 1920, 1200}
4072         };
4073
4074         n = ARRAY_SIZE(common_modes);
4075
4076         for (i = 0; i < n; i++) {
4077                 struct drm_display_mode *curmode = NULL;
4078                 bool mode_existed = false;
4079
4080                 if (common_modes[i].w > native_mode->hdisplay ||
4081                     common_modes[i].h > native_mode->vdisplay ||
4082                    (common_modes[i].w == native_mode->hdisplay &&
4083                     common_modes[i].h == native_mode->vdisplay))
4084                         continue;
4085
4086                 list_for_each_entry(curmode, &connector->probed_modes, head) {
4087                         if (common_modes[i].w == curmode->hdisplay &&
4088                             common_modes[i].h == curmode->vdisplay) {
4089                                 mode_existed = true;
4090                                 break;
4091                         }
4092                 }
4093
4094                 if (mode_existed)
4095                         continue;
4096
4097                 mode = amdgpu_dm_create_common_mode(encoder,
4098                                 common_modes[i].name, common_modes[i].w,
4099                                 common_modes[i].h);
4100                 drm_mode_probed_add(connector, mode);
4101                 amdgpu_dm_connector->num_modes++;
4102         }
4103 }
4104
4105 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
4106                                               struct edid *edid)
4107 {
4108         struct amdgpu_dm_connector *amdgpu_dm_connector =
4109                         to_amdgpu_dm_connector(connector);
4110
4111         if (edid) {
4112                 /* empty probed_modes */
4113                 INIT_LIST_HEAD(&connector->probed_modes);
4114                 amdgpu_dm_connector->num_modes =
4115                                 drm_add_edid_modes(connector, edid);
4116
4117                 amdgpu_dm_get_native_mode(connector);
4118         } else {
4119                 amdgpu_dm_connector->num_modes = 0;
4120         }
4121 }
4122
4123 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
4124 {
4125         struct amdgpu_dm_connector *amdgpu_dm_connector =
4126                         to_amdgpu_dm_connector(connector);
4127         struct drm_encoder *encoder;
4128         struct edid *edid = amdgpu_dm_connector->edid;
4129
4130         encoder = amdgpu_dm_connector_to_encoder(connector);
4131
4132         if (!edid || !drm_edid_is_valid(edid)) {
4133                 amdgpu_dm_connector->num_modes =
4134                                 drm_add_modes_noedid(connector, 640, 480);
4135         } else {
4136                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
4137                 amdgpu_dm_connector_add_common_modes(encoder, connector);
4138         }
4139         amdgpu_dm_fbc_init(connector);
4140
4141         return amdgpu_dm_connector->num_modes;
4142 }
4143
4144 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
4145                                      struct amdgpu_dm_connector *aconnector,
4146                                      int connector_type,
4147                                      struct dc_link *link,
4148                                      int link_index)
4149 {
4150         struct amdgpu_device *adev = dm->ddev->dev_private;
4151
4152         aconnector->connector_id = link_index;
4153         aconnector->dc_link = link;
4154         aconnector->base.interlace_allowed = false;
4155         aconnector->base.doublescan_allowed = false;
4156         aconnector->base.stereo_allowed = false;
4157         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
4158         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
4159         mutex_init(&aconnector->hpd_lock);
4160
4161         /*
4162          * configure support HPD hot plug connector_>polled default value is 0
4163          * which means HPD hot plug not supported
4164          */
4165         switch (connector_type) {
4166         case DRM_MODE_CONNECTOR_HDMIA:
4167                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
4168                 aconnector->base.ycbcr_420_allowed =
4169                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
4170                 break;
4171         case DRM_MODE_CONNECTOR_DisplayPort:
4172                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
4173                 aconnector->base.ycbcr_420_allowed =
4174                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
4175                 break;
4176         case DRM_MODE_CONNECTOR_DVID:
4177                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
4178                 break;
4179         default:
4180                 break;
4181         }
4182
4183         drm_object_attach_property(&aconnector->base.base,
4184                                 dm->ddev->mode_config.scaling_mode_property,
4185                                 DRM_MODE_SCALE_NONE);
4186
4187         drm_object_attach_property(&aconnector->base.base,
4188                                 adev->mode_info.underscan_property,
4189                                 UNDERSCAN_OFF);
4190         drm_object_attach_property(&aconnector->base.base,
4191                                 adev->mode_info.underscan_hborder_property,
4192                                 0);
4193         drm_object_attach_property(&aconnector->base.base,
4194                                 adev->mode_info.underscan_vborder_property,
4195                                 0);
4196         drm_object_attach_property(&aconnector->base.base,
4197                                 adev->mode_info.max_bpc_property,
4198                                 0);
4199
4200         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
4201             dc_is_dmcu_initialized(adev->dm.dc)) {
4202                 drm_object_attach_property(&aconnector->base.base,
4203                                 adev->mode_info.abm_level_property, 0);
4204         }
4205
4206         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
4207             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4208             connector_type == DRM_MODE_CONNECTOR_eDP) {
4209                 drm_connector_attach_vrr_capable_property(
4210                         &aconnector->base);
4211         }
4212 }
4213
4214 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
4215                               struct i2c_msg *msgs, int num)
4216 {
4217         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
4218         struct ddc_service *ddc_service = i2c->ddc_service;
4219         struct i2c_command cmd;
4220         int i;
4221         int result = -EIO;
4222
4223         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
4224
4225         if (!cmd.payloads)
4226                 return result;
4227
4228         cmd.number_of_payloads = num;
4229         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
4230         cmd.speed = 100;
4231
4232         for (i = 0; i < num; i++) {
4233                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
4234                 cmd.payloads[i].address = msgs[i].addr;
4235                 cmd.payloads[i].length = msgs[i].len;
4236                 cmd.payloads[i].data = msgs[i].buf;
4237         }
4238
4239         if (dc_submit_i2c(
4240                         ddc_service->ctx->dc,
4241                         ddc_service->ddc_pin->hw_info.ddc_channel,
4242                         &cmd))
4243                 result = num;
4244
4245         kfree(cmd.payloads);
4246         return result;
4247 }
4248
4249 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
4250 {
4251         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
4252 }
4253
4254 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
4255         .master_xfer = amdgpu_dm_i2c_xfer,
4256         .functionality = amdgpu_dm_i2c_func,
4257 };
4258
4259 static struct amdgpu_i2c_adapter *
4260 create_i2c(struct ddc_service *ddc_service,
4261            int link_index,
4262            int *res)
4263 {
4264         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
4265         struct amdgpu_i2c_adapter *i2c;
4266
4267         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
4268         if (!i2c)
4269                 return NULL;
4270         i2c->base.owner = THIS_MODULE;
4271         i2c->base.class = I2C_CLASS_DDC;
4272         i2c->base.dev.parent = &adev->pdev->dev;
4273         i2c->base.algo = &amdgpu_dm_i2c_algo;
4274         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
4275         i2c_set_adapdata(&i2c->base, i2c);
4276         i2c->ddc_service = ddc_service;
4277         i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
4278
4279         return i2c;
4280 }
4281
4282
4283 /*
4284  * Note: this function assumes that dc_link_detect() was called for the
4285  * dc_link which will be represented by this aconnector.
4286  */
4287 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
4288                                     struct amdgpu_dm_connector *aconnector,
4289                                     uint32_t link_index,
4290                                     struct amdgpu_encoder *aencoder)
4291 {
4292         int res = 0;
4293         int connector_type;
4294         struct dc *dc = dm->dc;
4295         struct dc_link *link = dc_get_link_at_index(dc, link_index);
4296         struct amdgpu_i2c_adapter *i2c;
4297
4298         link->priv = aconnector;
4299
4300         DRM_DEBUG_DRIVER("%s()\n", __func__);
4301
4302         i2c = create_i2c(link->ddc, link->link_index, &res);
4303         if (!i2c) {
4304                 DRM_ERROR("Failed to create i2c adapter data\n");
4305                 return -ENOMEM;
4306         }
4307
4308         aconnector->i2c = i2c;
4309         res = i2c_add_adapter(&i2c->base);
4310
4311         if (res) {
4312                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
4313                 goto out_free;
4314         }
4315
4316         connector_type = to_drm_connector_type(link->connector_signal);
4317
4318         res = drm_connector_init(
4319                         dm->ddev,
4320                         &aconnector->base,
4321                         &amdgpu_dm_connector_funcs,
4322                         connector_type);
4323
4324         if (res) {
4325                 DRM_ERROR("connector_init failed\n");
4326                 aconnector->connector_id = -1;
4327                 goto out_free;
4328         }
4329
4330         drm_connector_helper_add(
4331                         &aconnector->base,
4332                         &amdgpu_dm_connector_helper_funcs);
4333
4334         if (aconnector->base.funcs->reset)
4335                 aconnector->base.funcs->reset(&aconnector->base);
4336
4337         amdgpu_dm_connector_init_helper(
4338                 dm,
4339                 aconnector,
4340                 connector_type,
4341                 link,
4342                 link_index);
4343
4344         drm_connector_attach_encoder(
4345                 &aconnector->base, &aencoder->base);
4346
4347         drm_connector_register(&aconnector->base);
4348 #if defined(CONFIG_DEBUG_FS)
4349         res = connector_debugfs_init(aconnector);
4350         if (res) {
4351                 DRM_ERROR("Failed to create debugfs for connector");
4352                 goto out_free;
4353         }
4354 #endif
4355
4356         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
4357                 || connector_type == DRM_MODE_CONNECTOR_eDP)
4358                 amdgpu_dm_initialize_dp_connector(dm, aconnector);
4359
4360 out_free:
4361         if (res) {
4362                 kfree(i2c);
4363                 aconnector->i2c = NULL;
4364         }
4365         return res;
4366 }
4367
4368 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
4369 {
4370         switch (adev->mode_info.num_crtc) {
4371         case 1:
4372                 return 0x1;
4373         case 2:
4374                 return 0x3;
4375         case 3:
4376                 return 0x7;
4377         case 4:
4378                 return 0xf;
4379         case 5:
4380                 return 0x1f;
4381         case 6:
4382         default:
4383                 return 0x3f;
4384         }
4385 }
4386
4387 static int amdgpu_dm_encoder_init(struct drm_device *dev,
4388                                   struct amdgpu_encoder *aencoder,
4389                                   uint32_t link_index)
4390 {
4391         struct amdgpu_device *adev = dev->dev_private;
4392
4393         int res = drm_encoder_init(dev,
4394                                    &aencoder->base,
4395                                    &amdgpu_dm_encoder_funcs,
4396                                    DRM_MODE_ENCODER_TMDS,
4397                                    NULL);
4398
4399         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
4400
4401         if (!res)
4402                 aencoder->encoder_id = link_index;
4403         else
4404                 aencoder->encoder_id = -1;
4405
4406         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
4407
4408         return res;
4409 }
4410
4411 static void manage_dm_interrupts(struct amdgpu_device *adev,
4412                                  struct amdgpu_crtc *acrtc,
4413                                  bool enable)
4414 {
4415         /*
4416          * this is not correct translation but will work as soon as VBLANK
4417          * constant is the same as PFLIP
4418          */
4419         int irq_type =
4420                 amdgpu_display_crtc_idx_to_irq_type(
4421                         adev,
4422                         acrtc->crtc_id);
4423
4424         if (enable) {
4425                 drm_crtc_vblank_on(&acrtc->base);
4426                 amdgpu_irq_get(
4427                         adev,
4428                         &adev->pageflip_irq,
4429                         irq_type);
4430         } else {
4431
4432                 amdgpu_irq_put(
4433                         adev,
4434                         &adev->pageflip_irq,
4435                         irq_type);
4436                 drm_crtc_vblank_off(&acrtc->base);
4437         }
4438 }
4439
4440 static bool
4441 is_scaling_state_different(const struct dm_connector_state *dm_state,
4442                            const struct dm_connector_state *old_dm_state)
4443 {
4444         if (dm_state->scaling != old_dm_state->scaling)
4445                 return true;
4446         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
4447                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
4448                         return true;
4449         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
4450                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
4451                         return true;
4452         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
4453                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
4454                 return true;
4455         return false;
4456 }
4457
4458 static void remove_stream(struct amdgpu_device *adev,
4459                           struct amdgpu_crtc *acrtc,
4460                           struct dc_stream_state *stream)
4461 {
4462         /* this is the update mode case */
4463
4464         acrtc->otg_inst = -1;
4465         acrtc->enabled = false;
4466 }
4467
4468 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
4469                                struct dc_cursor_position *position)
4470 {
4471         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
4472         int x, y;
4473         int xorigin = 0, yorigin = 0;
4474
4475         if (!crtc || !plane->state->fb) {
4476                 position->enable = false;
4477                 position->x = 0;
4478                 position->y = 0;
4479                 return 0;
4480         }
4481
4482         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
4483             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
4484                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
4485                           __func__,
4486                           plane->state->crtc_w,
4487                           plane->state->crtc_h);
4488                 return -EINVAL;
4489         }
4490
4491         x = plane->state->crtc_x;
4492         y = plane->state->crtc_y;
4493         /* avivo cursor are offset into the total surface */
4494         x += crtc->primary->state->src_x >> 16;
4495         y += crtc->primary->state->src_y >> 16;
4496         if (x < 0) {
4497                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
4498                 x = 0;
4499         }
4500         if (y < 0) {
4501                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
4502                 y = 0;
4503         }
4504         position->enable = true;
4505         position->x = x;
4506         position->y = y;
4507         position->x_hotspot = xorigin;
4508         position->y_hotspot = yorigin;
4509
4510         return 0;
4511 }
4512
4513 static void handle_cursor_update(struct drm_plane *plane,
4514                                  struct drm_plane_state *old_plane_state)
4515 {
4516         struct amdgpu_device *adev = plane->dev->dev_private;
4517         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
4518         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
4519         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
4520         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
4521         uint64_t address = afb ? afb->address : 0;
4522         struct dc_cursor_position position;
4523         struct dc_cursor_attributes attributes;
4524         int ret;
4525
4526         if (!plane->state->fb && !old_plane_state->fb)
4527                 return;
4528
4529         DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
4530                          __func__,
4531                          amdgpu_crtc->crtc_id,
4532                          plane->state->crtc_w,
4533                          plane->state->crtc_h);
4534
4535         ret = get_cursor_position(plane, crtc, &position);
4536         if (ret)
4537                 return;
4538
4539         if (!position.enable) {
4540                 /* turn off cursor */
4541                 if (crtc_state && crtc_state->stream) {
4542                         mutex_lock(&adev->dm.dc_lock);
4543                         dc_stream_set_cursor_position(crtc_state->stream,
4544                                                       &position);
4545                         mutex_unlock(&adev->dm.dc_lock);
4546                 }
4547                 return;
4548         }
4549
4550         amdgpu_crtc->cursor_width = plane->state->crtc_w;
4551         amdgpu_crtc->cursor_height = plane->state->crtc_h;
4552
4553         attributes.address.high_part = upper_32_bits(address);
4554         attributes.address.low_part  = lower_32_bits(address);
4555         attributes.width             = plane->state->crtc_w;
4556         attributes.height            = plane->state->crtc_h;
4557         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
4558         attributes.rotation_angle    = 0;
4559         attributes.attribute_flags.value = 0;
4560
4561         attributes.pitch = attributes.width;
4562
4563         if (crtc_state->stream) {
4564                 mutex_lock(&adev->dm.dc_lock);
4565                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
4566                                                          &attributes))
4567                         DRM_ERROR("DC failed to set cursor attributes\n");
4568
4569                 if (!dc_stream_set_cursor_position(crtc_state->stream,
4570                                                    &position))
4571                         DRM_ERROR("DC failed to set cursor position\n");
4572                 mutex_unlock(&adev->dm.dc_lock);
4573         }
4574 }
4575
4576 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
4577 {
4578
4579         assert_spin_locked(&acrtc->base.dev->event_lock);
4580         WARN_ON(acrtc->event);
4581
4582         acrtc->event = acrtc->base.state->event;
4583
4584         /* Set the flip status */
4585         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
4586
4587         /* Mark this event as consumed */
4588         acrtc->base.state->event = NULL;
4589
4590         DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
4591                                                  acrtc->crtc_id);
4592 }
4593
4594 static void update_freesync_state_on_stream(
4595         struct amdgpu_display_manager *dm,
4596         struct dm_crtc_state *new_crtc_state,
4597         struct dc_stream_state *new_stream,
4598         struct dc_plane_state *surface,
4599         u32 flip_timestamp_in_us)
4600 {
4601         struct mod_vrr_params vrr_params = new_crtc_state->vrr_params;
4602         struct dc_info_packet vrr_infopacket = {0};
4603         struct mod_freesync_config config = new_crtc_state->freesync_config;
4604
4605         if (!new_stream)
4606                 return;
4607
4608         /*
4609          * TODO: Determine why min/max totals and vrefresh can be 0 here.
4610          * For now it's sufficient to just guard against these conditions.
4611          */
4612
4613         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
4614                 return;
4615
4616         if (new_crtc_state->vrr_supported &&
4617             config.min_refresh_in_uhz &&
4618             config.max_refresh_in_uhz) {
4619                 config.state = new_crtc_state->base.vrr_enabled ?
4620                         VRR_STATE_ACTIVE_VARIABLE :
4621                         VRR_STATE_INACTIVE;
4622         } else {
4623                 config.state = VRR_STATE_UNSUPPORTED;
4624         }
4625
4626         mod_freesync_build_vrr_params(dm->freesync_module,
4627                                       new_stream,
4628                                       &config, &vrr_params);
4629
4630         if (surface) {
4631                 mod_freesync_handle_preflip(
4632                         dm->freesync_module,
4633                         surface,
4634                         new_stream,
4635                         flip_timestamp_in_us,
4636                         &vrr_params);
4637         }
4638
4639         mod_freesync_build_vrr_infopacket(
4640                 dm->freesync_module,
4641                 new_stream,
4642                 &vrr_params,
4643                 PACKET_TYPE_VRR,
4644                 TRANSFER_FUNC_UNKNOWN,
4645                 &vrr_infopacket);
4646
4647         new_crtc_state->freesync_timing_changed |=
4648                 (memcmp(&new_crtc_state->vrr_params.adjust,
4649                         &vrr_params.adjust,
4650                         sizeof(vrr_params.adjust)) != 0);
4651
4652         new_crtc_state->freesync_vrr_info_changed |=
4653                 (memcmp(&new_crtc_state->vrr_infopacket,
4654                         &vrr_infopacket,
4655                         sizeof(vrr_infopacket)) != 0);
4656
4657         new_crtc_state->vrr_params = vrr_params;
4658         new_crtc_state->vrr_infopacket = vrr_infopacket;
4659
4660         new_stream->adjust = new_crtc_state->vrr_params.adjust;
4661         new_stream->vrr_infopacket = vrr_infopacket;
4662
4663         if (new_crtc_state->freesync_vrr_info_changed)
4664                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
4665                               new_crtc_state->base.crtc->base.id,
4666                               (int)new_crtc_state->base.vrr_enabled,
4667                               (int)vrr_params.state);
4668 }
4669
4670 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
4671                                     struct dc_state *dc_state,
4672                                     struct drm_device *dev,
4673                                     struct amdgpu_display_manager *dm,
4674                                     struct drm_crtc *pcrtc,
4675                                     bool wait_for_vblank)
4676 {
4677         uint32_t i, r;
4678         uint64_t timestamp_ns;
4679         struct drm_plane *plane;
4680         struct drm_plane_state *old_plane_state, *new_plane_state;
4681         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
4682         struct drm_crtc_state *new_pcrtc_state =
4683                         drm_atomic_get_new_crtc_state(state, pcrtc);
4684         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
4685         struct dm_crtc_state *dm_old_crtc_state =
4686                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
4687         int planes_count = 0, vpos, hpos;
4688         unsigned long flags;
4689         struct amdgpu_bo *abo;
4690         uint64_t tiling_flags;
4691         uint32_t target, target_vblank;
4692         uint64_t last_flip_vblank;
4693         bool vrr_active = acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE;
4694         bool pflip_present = false;
4695
4696         struct {
4697                 struct dc_surface_update surface_updates[MAX_SURFACES];
4698                 struct dc_plane_info plane_infos[MAX_SURFACES];
4699                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
4700                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
4701                 struct dc_stream_update stream_update;
4702         } *bundle;
4703
4704         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
4705
4706         if (!bundle) {
4707                 dm_error("Failed to allocate update bundle\n");
4708                 goto cleanup;
4709         }
4710
4711         /* update planes when needed */
4712         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
4713                 struct drm_crtc *crtc = new_plane_state->crtc;
4714                 struct drm_crtc_state *new_crtc_state;
4715                 struct drm_framebuffer *fb = new_plane_state->fb;
4716                 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
4717                 bool plane_needs_flip;
4718                 struct dc_plane_state *dc_plane;
4719                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
4720
4721                 /* Cursor plane is handled after stream updates */
4722                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
4723                         continue;
4724
4725                 if (!fb || !crtc || pcrtc != crtc)
4726                         continue;
4727
4728                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
4729                 if (!new_crtc_state->active)
4730                         continue;
4731
4732                 dc_plane = dm_new_plane_state->dc_state;
4733
4734                 bundle->surface_updates[planes_count].surface = dc_plane;
4735                 if (new_pcrtc_state->color_mgmt_changed) {
4736                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
4737                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
4738                 }
4739
4740
4741                 bundle->scaling_infos[planes_count].scaling_quality = dc_plane->scaling_quality;
4742                 bundle->scaling_infos[planes_count].src_rect = dc_plane->src_rect;
4743                 bundle->scaling_infos[planes_count].dst_rect = dc_plane->dst_rect;
4744                 bundle->scaling_infos[planes_count].clip_rect = dc_plane->clip_rect;
4745                 bundle->surface_updates[planes_count].scaling_info = &bundle->scaling_infos[planes_count];
4746
4747
4748                 bundle->plane_infos[planes_count].color_space = dc_plane->color_space;
4749                 bundle->plane_infos[planes_count].format = dc_plane->format;
4750                 bundle->plane_infos[planes_count].plane_size = dc_plane->plane_size;
4751                 bundle->plane_infos[planes_count].rotation = dc_plane->rotation;
4752                 bundle->plane_infos[planes_count].horizontal_mirror = dc_plane->horizontal_mirror;
4753                 bundle->plane_infos[planes_count].stereo_format = dc_plane->stereo_format;
4754                 bundle->plane_infos[planes_count].tiling_info = dc_plane->tiling_info;
4755                 bundle->plane_infos[planes_count].visible = dc_plane->visible;
4756                 bundle->plane_infos[planes_count].per_pixel_alpha = dc_plane->per_pixel_alpha;
4757                 bundle->plane_infos[planes_count].dcc = dc_plane->dcc;
4758                 bundle->surface_updates[planes_count].plane_info = &bundle->plane_infos[planes_count];
4759
4760                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
4761
4762                 pflip_present = pflip_present || plane_needs_flip;
4763
4764                 if (!plane_needs_flip) {
4765                         planes_count += 1;
4766                         continue;
4767                 }
4768
4769                 /*
4770                  * TODO This might fail and hence better not used, wait
4771                  * explicitly on fences instead
4772                  * and in general should be called for
4773                  * blocking commit to as per framework helpers
4774                  */
4775                 abo = gem_to_amdgpu_bo(fb->obj[0]);
4776                 r = amdgpu_bo_reserve(abo, true);
4777                 if (unlikely(r != 0)) {
4778                         DRM_ERROR("failed to reserve buffer before flip\n");
4779                         WARN_ON(1);
4780                 }
4781
4782                 /* Wait for all fences on this FB */
4783                 WARN_ON(reservation_object_wait_timeout_rcu(abo->tbo.resv, true, false,
4784                                                                             MAX_SCHEDULE_TIMEOUT) < 0);
4785
4786                 amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
4787
4788                 amdgpu_bo_unreserve(abo);
4789
4790                 bundle->flip_addrs[planes_count].address.grph.addr.low_part = lower_32_bits(afb->address);
4791                 bundle->flip_addrs[planes_count].address.grph.addr.high_part = upper_32_bits(afb->address);
4792
4793                 fill_plane_tiling_attributes(dm->adev, afb, dc_plane,
4794                         &bundle->plane_infos[planes_count].tiling_info,
4795                         &bundle->plane_infos[planes_count].dcc,
4796                         &bundle->flip_addrs[planes_count].address,
4797                         tiling_flags);
4798
4799                 bundle->flip_addrs[planes_count].flip_immediate =
4800                                 (crtc->state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
4801
4802                 timestamp_ns = ktime_get_ns();
4803                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
4804                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
4805                 bundle->surface_updates[planes_count].surface = dc_plane;
4806
4807                 if (!bundle->surface_updates[planes_count].surface) {
4808                         DRM_ERROR("No surface for CRTC: id=%d\n",
4809                                         acrtc_attach->crtc_id);
4810                         continue;
4811                 }
4812
4813                 if (plane == pcrtc->primary)
4814                         update_freesync_state_on_stream(
4815                                 dm,
4816                                 acrtc_state,
4817                                 acrtc_state->stream,
4818                                 dc_plane,
4819                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
4820
4821                 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
4822                                  __func__,
4823                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
4824                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
4825
4826                 planes_count += 1;
4827
4828         }
4829
4830         if (pflip_present) {
4831                 if (!vrr_active) {
4832                         /* Use old throttling in non-vrr fixed refresh rate mode
4833                          * to keep flip scheduling based on target vblank counts
4834                          * working in a backwards compatible way, e.g., for
4835                          * clients using the GLX_OML_sync_control extension or
4836                          * DRI3/Present extension with defined target_msc.
4837                          */
4838                         last_flip_vblank = drm_crtc_vblank_count(pcrtc);
4839                 }
4840                 else {
4841                         /* For variable refresh rate mode only:
4842                          * Get vblank of last completed flip to avoid > 1 vrr
4843                          * flips per video frame by use of throttling, but allow
4844                          * flip programming anywhere in the possibly large
4845                          * variable vrr vblank interval for fine-grained flip
4846                          * timing control and more opportunity to avoid stutter
4847                          * on late submission of flips.
4848                          */
4849                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
4850                         last_flip_vblank = acrtc_attach->last_flip_vblank;
4851                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
4852                 }
4853
4854                 target = (uint32_t)last_flip_vblank + wait_for_vblank;
4855
4856                 /* Prepare wait for target vblank early - before the fence-waits */
4857                 target_vblank = target - (uint32_t)drm_crtc_vblank_count(pcrtc) +
4858                                 amdgpu_get_vblank_counter_kms(pcrtc->dev, acrtc_attach->crtc_id);
4859
4860                 /*
4861                  * Wait until we're out of the vertical blank period before the one
4862                  * targeted by the flip
4863                  */
4864                 while ((acrtc_attach->enabled &&
4865                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
4866                                                             0, &vpos, &hpos, NULL,
4867                                                             NULL, &pcrtc->hwmode)
4868                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
4869                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
4870                         (int)(target_vblank -
4871                           amdgpu_get_vblank_counter_kms(dm->ddev, acrtc_attach->crtc_id)) > 0)) {
4872                         usleep_range(1000, 1100);
4873                 }
4874
4875                 if (acrtc_attach->base.state->event) {
4876                         drm_crtc_vblank_get(pcrtc);
4877
4878                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
4879
4880                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
4881                         prepare_flip_isr(acrtc_attach);
4882
4883                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
4884                 }
4885
4886                 if (acrtc_state->stream) {
4887
4888                         if (acrtc_state->freesync_timing_changed)
4889                                 bundle->stream_update.adjust =
4890                                         &acrtc_state->stream->adjust;
4891
4892                         if (acrtc_state->freesync_vrr_info_changed)
4893                                 bundle->stream_update.vrr_infopacket =
4894                                         &acrtc_state->stream->vrr_infopacket;
4895                 }
4896         }
4897
4898         if (planes_count) {
4899                 if (new_pcrtc_state->mode_changed) {
4900                         bundle->stream_update.src = acrtc_state->stream->src;
4901                         bundle->stream_update.dst = acrtc_state->stream->dst;
4902                 }
4903
4904                 if (new_pcrtc_state->color_mgmt_changed)
4905                         bundle->stream_update.out_transfer_func = acrtc_state->stream->out_transfer_func;
4906
4907                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
4908                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
4909                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
4910
4911                 mutex_lock(&dm->dc_lock);
4912                 dc_commit_updates_for_stream(dm->dc,
4913                                                      bundle->surface_updates,
4914                                                      planes_count,
4915                                                      acrtc_state->stream,
4916                                                      &bundle->stream_update,
4917                                                      dc_state);
4918                 mutex_unlock(&dm->dc_lock);
4919         }
4920
4921         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
4922                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
4923                         handle_cursor_update(plane, old_plane_state);
4924
4925 cleanup:
4926         kfree(bundle);
4927 }
4928
4929 /*
4930  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
4931  * @crtc_state: the DRM CRTC state
4932  * @stream_state: the DC stream state.
4933  *
4934  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
4935  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
4936  */
4937 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
4938                                                 struct dc_stream_state *stream_state)
4939 {
4940         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
4941 }
4942
4943 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
4944                                    struct drm_atomic_state *state,
4945                                    bool nonblock)
4946 {
4947         struct drm_crtc *crtc;
4948         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
4949         struct amdgpu_device *adev = dev->dev_private;
4950         int i;
4951
4952         /*
4953          * We evade vblanks and pflips on crtc that
4954          * should be changed. We do it here to flush & disable
4955          * interrupts before drm_swap_state is called in drm_atomic_helper_commit
4956          * it will update crtc->dm_crtc_state->stream pointer which is used in
4957          * the ISRs.
4958          */
4959         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
4960                 struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
4961                 struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4962                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4963
4964                 if (drm_atomic_crtc_needs_modeset(new_crtc_state)
4965                     && dm_old_crtc_state->stream) {
4966                         /*
4967                          * If the stream is removed and CRC capture was
4968                          * enabled on the CRTC the extra vblank reference
4969                          * needs to be dropped since CRC capture will be
4970                          * disabled.
4971                          */
4972                         if (!dm_new_crtc_state->stream
4973                             && dm_new_crtc_state->crc_enabled) {
4974                                 drm_crtc_vblank_put(crtc);
4975                                 dm_new_crtc_state->crc_enabled = false;
4976                         }
4977
4978                         manage_dm_interrupts(adev, acrtc, false);
4979                 }
4980         }
4981         /*
4982          * Add check here for SoC's that support hardware cursor plane, to
4983          * unset legacy_cursor_update
4984          */
4985
4986         return drm_atomic_helper_commit(dev, state, nonblock);
4987
4988         /*TODO Handle EINTR, reenable IRQ*/
4989 }
4990
4991 /**
4992  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
4993  * @state: The atomic state to commit
4994  *
4995  * This will tell DC to commit the constructed DC state from atomic_check,
4996  * programming the hardware. Any failures here implies a hardware failure, since
4997  * atomic check should have filtered anything non-kosher.
4998  */
4999 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
5000 {
5001         struct drm_device *dev = state->dev;
5002         struct amdgpu_device *adev = dev->dev_private;
5003         struct amdgpu_display_manager *dm = &adev->dm;
5004         struct dm_atomic_state *dm_state;
5005         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
5006         uint32_t i, j;
5007         struct drm_crtc *crtc;
5008         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
5009         unsigned long flags;
5010         bool wait_for_vblank = true;
5011         struct drm_connector *connector;
5012         struct drm_connector_state *old_con_state, *new_con_state;
5013         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
5014         int crtc_disable_count = 0;
5015
5016         drm_atomic_helper_update_legacy_modeset_state(dev, state);
5017
5018         dm_state = dm_atomic_get_new_state(state);
5019         if (dm_state && dm_state->context) {
5020                 dc_state = dm_state->context;
5021         } else {
5022                 /* No state changes, retain current state. */
5023                 dc_state_temp = dc_create_state();
5024                 ASSERT(dc_state_temp);
5025                 dc_state = dc_state_temp;
5026                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
5027         }
5028
5029         /* update changed items */
5030         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
5031                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5032
5033                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
5034                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
5035
5036                 DRM_DEBUG_DRIVER(
5037                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
5038                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
5039                         "connectors_changed:%d\n",
5040                         acrtc->crtc_id,
5041                         new_crtc_state->enable,
5042                         new_crtc_state->active,
5043                         new_crtc_state->planes_changed,
5044                         new_crtc_state->mode_changed,
5045                         new_crtc_state->active_changed,
5046                         new_crtc_state->connectors_changed);
5047
5048                 /* Copy all transient state flags into dc state */
5049                 if (dm_new_crtc_state->stream) {
5050                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
5051                                                             dm_new_crtc_state->stream);
5052                 }
5053
5054                 /* handles headless hotplug case, updating new_state and
5055                  * aconnector as needed
5056                  */
5057
5058                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
5059
5060                         DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
5061
5062                         if (!dm_new_crtc_state->stream) {
5063                                 /*
5064                                  * this could happen because of issues with
5065                                  * userspace notifications delivery.
5066                                  * In this case userspace tries to set mode on
5067                                  * display which is disconnected in fact.
5068                                  * dc_sink is NULL in this case on aconnector.
5069                                  * We expect reset mode will come soon.
5070                                  *
5071                                  * This can also happen when unplug is done
5072                                  * during resume sequence ended
5073                                  *
5074                                  * In this case, we want to pretend we still
5075                                  * have a sink to keep the pipe running so that
5076                                  * hw state is consistent with the sw state
5077                                  */
5078                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
5079                                                 __func__, acrtc->base.base.id);
5080                                 continue;
5081                         }
5082
5083                         if (dm_old_crtc_state->stream)
5084                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
5085
5086                         pm_runtime_get_noresume(dev->dev);
5087
5088                         acrtc->enabled = true;
5089                         acrtc->hw_mode = new_crtc_state->mode;
5090                         crtc->hwmode = new_crtc_state->mode;
5091                 } else if (modereset_required(new_crtc_state)) {
5092                         DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
5093
5094                         /* i.e. reset mode */
5095                         if (dm_old_crtc_state->stream)
5096                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
5097                 }
5098         } /* for_each_crtc_in_state() */
5099
5100         if (dc_state) {
5101                 dm_enable_per_frame_crtc_master_sync(dc_state);
5102                 mutex_lock(&dm->dc_lock);
5103                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
5104                 mutex_unlock(&dm->dc_lock);
5105         }
5106
5107         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
5108                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5109
5110                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
5111
5112                 if (dm_new_crtc_state->stream != NULL) {
5113                         const struct dc_stream_status *status =
5114                                         dc_stream_get_status(dm_new_crtc_state->stream);
5115
5116                         if (!status)
5117                                 status = dc_stream_get_status_from_state(dc_state,
5118                                                                          dm_new_crtc_state->stream);
5119
5120                         if (!status)
5121                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
5122                         else
5123                                 acrtc->otg_inst = status->primary_otg_inst;
5124                 }
5125         }
5126
5127         /* Handle connector state changes */
5128         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5129                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
5130                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
5131                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
5132                 struct dc_surface_update dummy_updates[MAX_SURFACES];
5133                 struct dc_stream_update stream_update;
5134                 struct dc_stream_status *status = NULL;
5135
5136                 memset(&dummy_updates, 0, sizeof(dummy_updates));
5137                 memset(&stream_update, 0, sizeof(stream_update));
5138
5139                 if (acrtc) {
5140                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
5141                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
5142                 }
5143
5144                 /* Skip any modesets/resets */
5145                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
5146                         continue;
5147
5148                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
5149                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
5150
5151                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state) &&
5152                                 (dm_new_crtc_state->abm_level == dm_old_crtc_state->abm_level))
5153                         continue;
5154
5155                 if (is_scaling_state_different(dm_new_con_state, dm_old_con_state)) {
5156                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
5157                                         dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream);
5158
5159                         stream_update.src = dm_new_crtc_state->stream->src;
5160                         stream_update.dst = dm_new_crtc_state->stream->dst;
5161                 }
5162
5163                 if (dm_new_crtc_state->abm_level != dm_old_crtc_state->abm_level) {
5164                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
5165
5166                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
5167                 }
5168
5169                 status = dc_stream_get_status(dm_new_crtc_state->stream);
5170                 WARN_ON(!status);
5171                 WARN_ON(!status->plane_count);
5172
5173                 /*
5174                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
5175                  * Here we create an empty update on each plane.
5176                  * To fix this, DC should permit updating only stream properties.
5177                  */
5178                 for (j = 0; j < status->plane_count; j++)
5179                         dummy_updates[j].surface = status->plane_states[0];
5180
5181
5182                 mutex_lock(&dm->dc_lock);
5183                 dc_commit_updates_for_stream(dm->dc,
5184                                                      dummy_updates,
5185                                                      status->plane_count,
5186                                                      dm_new_crtc_state->stream,
5187                                                      &stream_update,
5188                                                      dc_state);
5189                 mutex_unlock(&dm->dc_lock);
5190         }
5191
5192         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
5193                         new_crtc_state, i) {
5194                 /*
5195                  * loop to enable interrupts on newly arrived crtc
5196                  */
5197                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5198                 bool modeset_needed;
5199
5200                 if (old_crtc_state->active && !new_crtc_state->active)
5201                         crtc_disable_count++;
5202
5203                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
5204                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
5205                 modeset_needed = modeset_required(
5206                                 new_crtc_state,
5207                                 dm_new_crtc_state->stream,
5208                                 dm_old_crtc_state->stream);
5209
5210                 if (dm_new_crtc_state->stream == NULL || !modeset_needed)
5211                         continue;
5212
5213                 manage_dm_interrupts(adev, acrtc, true);
5214
5215 #ifdef CONFIG_DEBUG_FS
5216                 /* The stream has changed so CRC capture needs to re-enabled. */
5217                 if (dm_new_crtc_state->crc_enabled)
5218                         amdgpu_dm_crtc_set_crc_source(crtc, "auto");
5219 #endif
5220         }
5221
5222         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
5223                 if (new_crtc_state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC)
5224                         wait_for_vblank = false;
5225
5226         /* update planes when needed per crtc*/
5227         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
5228                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
5229
5230                 if (dm_new_crtc_state->stream)
5231                         amdgpu_dm_commit_planes(state, dc_state, dev,
5232                                                 dm, crtc, wait_for_vblank);
5233         }
5234
5235
5236         /*
5237          * send vblank event on all events not handled in flip and
5238          * mark consumed event for drm_atomic_helper_commit_hw_done
5239          */
5240         spin_lock_irqsave(&adev->ddev->event_lock, flags);
5241         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
5242
5243                 if (new_crtc_state->event)
5244                         drm_send_event_locked(dev, &new_crtc_state->event->base);
5245
5246                 new_crtc_state->event = NULL;
5247         }
5248         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
5249
5250         /* Signal HW programming completion */
5251         drm_atomic_helper_commit_hw_done(state);
5252
5253         if (wait_for_vblank)
5254                 drm_atomic_helper_wait_for_flip_done(dev, state);
5255
5256         drm_atomic_helper_cleanup_planes(dev, state);
5257
5258         /*
5259          * Finally, drop a runtime PM reference for each newly disabled CRTC,
5260          * so we can put the GPU into runtime suspend if we're not driving any
5261          * displays anymore
5262          */
5263         for (i = 0; i < crtc_disable_count; i++)
5264                 pm_runtime_put_autosuspend(dev->dev);
5265         pm_runtime_mark_last_busy(dev->dev);
5266
5267         if (dc_state_temp)
5268                 dc_release_state(dc_state_temp);
5269 }
5270
5271
5272 static int dm_force_atomic_commit(struct drm_connector *connector)
5273 {
5274         int ret = 0;
5275         struct drm_device *ddev = connector->dev;
5276         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
5277         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
5278         struct drm_plane *plane = disconnected_acrtc->base.primary;
5279         struct drm_connector_state *conn_state;
5280         struct drm_crtc_state *crtc_state;
5281         struct drm_plane_state *plane_state;
5282
5283         if (!state)
5284                 return -ENOMEM;
5285
5286         state->acquire_ctx = ddev->mode_config.acquire_ctx;
5287
5288         /* Construct an atomic state to restore previous display setting */
5289
5290         /*
5291          * Attach connectors to drm_atomic_state
5292          */
5293         conn_state = drm_atomic_get_connector_state(state, connector);
5294
5295         ret = PTR_ERR_OR_ZERO(conn_state);
5296         if (ret)
5297                 goto err;
5298
5299         /* Attach crtc to drm_atomic_state*/
5300         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
5301
5302         ret = PTR_ERR_OR_ZERO(crtc_state);
5303         if (ret)
5304                 goto err;
5305
5306         /* force a restore */
5307         crtc_state->mode_changed = true;
5308
5309         /* Attach plane to drm_atomic_state */
5310         plane_state = drm_atomic_get_plane_state(state, plane);
5311
5312         ret = PTR_ERR_OR_ZERO(plane_state);
5313         if (ret)
5314                 goto err;
5315
5316
5317         /* Call commit internally with the state we just constructed */
5318         ret = drm_atomic_commit(state);
5319         if (!ret)
5320                 return 0;
5321
5322 err:
5323         DRM_ERROR("Restoring old state failed with %i\n", ret);
5324         drm_atomic_state_put(state);
5325
5326         return ret;
5327 }
5328
5329 /*
5330  * This function handles all cases when set mode does not come upon hotplug.
5331  * This includes when a display is unplugged then plugged back into the
5332  * same port and when running without usermode desktop manager supprot
5333  */
5334 void dm_restore_drm_connector_state(struct drm_device *dev,
5335                                     struct drm_connector *connector)
5336 {
5337         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5338         struct amdgpu_crtc *disconnected_acrtc;
5339         struct dm_crtc_state *acrtc_state;
5340
5341         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
5342                 return;
5343
5344         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
5345         if (!disconnected_acrtc)
5346                 return;
5347
5348         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
5349         if (!acrtc_state->stream)
5350                 return;
5351
5352         /*
5353          * If the previous sink is not released and different from the current,
5354          * we deduce we are in a state where we can not rely on usermode call
5355          * to turn on the display, so we do it here
5356          */
5357         if (acrtc_state->stream->sink != aconnector->dc_sink)
5358                 dm_force_atomic_commit(&aconnector->base);
5359 }
5360
5361 /*
5362  * Grabs all modesetting locks to serialize against any blocking commits,
5363  * Waits for completion of all non blocking commits.
5364  */
5365 static int do_aquire_global_lock(struct drm_device *dev,
5366                                  struct drm_atomic_state *state)
5367 {
5368         struct drm_crtc *crtc;
5369         struct drm_crtc_commit *commit;
5370         long ret;
5371
5372         /*
5373          * Adding all modeset locks to aquire_ctx will
5374          * ensure that when the framework release it the
5375          * extra locks we are locking here will get released to
5376          */
5377         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
5378         if (ret)
5379                 return ret;
5380
5381         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
5382                 spin_lock(&crtc->commit_lock);
5383                 commit = list_first_entry_or_null(&crtc->commit_list,
5384                                 struct drm_crtc_commit, commit_entry);
5385                 if (commit)
5386                         drm_crtc_commit_get(commit);
5387                 spin_unlock(&crtc->commit_lock);
5388
5389                 if (!commit)
5390                         continue;
5391
5392                 /*
5393                  * Make sure all pending HW programming completed and
5394                  * page flips done
5395                  */
5396                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
5397
5398                 if (ret > 0)
5399                         ret = wait_for_completion_interruptible_timeout(
5400                                         &commit->flip_done, 10*HZ);
5401
5402                 if (ret == 0)
5403                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
5404                                   "timed out\n", crtc->base.id, crtc->name);
5405
5406                 drm_crtc_commit_put(commit);
5407         }
5408
5409         return ret < 0 ? ret : 0;
5410 }
5411
5412 static void get_freesync_config_for_crtc(
5413         struct dm_crtc_state *new_crtc_state,
5414         struct dm_connector_state *new_con_state)
5415 {
5416         struct mod_freesync_config config = {0};
5417         struct amdgpu_dm_connector *aconnector =
5418                         to_amdgpu_dm_connector(new_con_state->base.connector);
5419         struct drm_display_mode *mode = &new_crtc_state->base.mode;
5420
5421         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
5422                 aconnector->min_vfreq <= drm_mode_vrefresh(mode);
5423
5424         if (new_crtc_state->vrr_supported) {
5425                 new_crtc_state->stream->ignore_msa_timing_param = true;
5426                 config.state = new_crtc_state->base.vrr_enabled ?
5427                                 VRR_STATE_ACTIVE_VARIABLE :
5428                                 VRR_STATE_INACTIVE;
5429                 config.min_refresh_in_uhz =
5430                                 aconnector->min_vfreq * 1000000;
5431                 config.max_refresh_in_uhz =
5432                                 aconnector->max_vfreq * 1000000;
5433                 config.vsif_supported = true;
5434                 config.btr = true;
5435         }
5436
5437         new_crtc_state->freesync_config = config;
5438 }
5439
5440 static void reset_freesync_config_for_crtc(
5441         struct dm_crtc_state *new_crtc_state)
5442 {
5443         new_crtc_state->vrr_supported = false;
5444
5445         memset(&new_crtc_state->vrr_params, 0,
5446                sizeof(new_crtc_state->vrr_params));
5447         memset(&new_crtc_state->vrr_infopacket, 0,
5448                sizeof(new_crtc_state->vrr_infopacket));
5449 }
5450
5451 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
5452                                 struct drm_atomic_state *state,
5453                                 struct drm_crtc *crtc,
5454                                 struct drm_crtc_state *old_crtc_state,
5455                                 struct drm_crtc_state *new_crtc_state,
5456                                 bool enable,
5457                                 bool *lock_and_validation_needed)
5458 {
5459         struct dm_atomic_state *dm_state = NULL;
5460         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
5461         struct dc_stream_state *new_stream;
5462         int ret = 0;
5463
5464         /*
5465          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
5466          * update changed items
5467          */
5468         struct amdgpu_crtc *acrtc = NULL;
5469         struct amdgpu_dm_connector *aconnector = NULL;
5470         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
5471         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
5472         struct drm_plane_state *new_plane_state = NULL;
5473
5474         new_stream = NULL;
5475
5476         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
5477         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
5478         acrtc = to_amdgpu_crtc(crtc);
5479
5480         new_plane_state = drm_atomic_get_new_plane_state(state, new_crtc_state->crtc->primary);
5481
5482         if (new_crtc_state->enable && new_plane_state && !new_plane_state->fb) {
5483                 ret = -EINVAL;
5484                 goto fail;
5485         }
5486
5487         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
5488
5489         /* TODO This hack should go away */
5490         if (aconnector && enable) {
5491                 /* Make sure fake sink is created in plug-in scenario */
5492                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
5493                                                             &aconnector->base);
5494                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
5495                                                             &aconnector->base);
5496
5497                 if (IS_ERR(drm_new_conn_state)) {
5498                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
5499                         goto fail;
5500                 }
5501
5502                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
5503                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
5504
5505                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
5506                         goto skip_modeset;
5507
5508                 new_stream = create_stream_for_sink(aconnector,
5509                                                      &new_crtc_state->mode,
5510                                                     dm_new_conn_state,
5511                                                     dm_old_crtc_state->stream);
5512
5513                 /*
5514                  * we can have no stream on ACTION_SET if a display
5515                  * was disconnected during S3, in this case it is not an
5516                  * error, the OS will be updated after detection, and
5517                  * will do the right thing on next atomic commit
5518                  */
5519
5520                 if (!new_stream) {
5521                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
5522                                         __func__, acrtc->base.base.id);
5523                         ret = -ENOMEM;
5524                         goto fail;
5525                 }
5526
5527                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
5528
5529                 if (dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
5530                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
5531                         new_crtc_state->mode_changed = false;
5532                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
5533                                          new_crtc_state->mode_changed);
5534                 }
5535         }
5536
5537         /* mode_changed flag may get updated above, need to check again */
5538         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
5539                 goto skip_modeset;
5540
5541         DRM_DEBUG_DRIVER(
5542                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
5543                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
5544                 "connectors_changed:%d\n",
5545                 acrtc->crtc_id,
5546                 new_crtc_state->enable,
5547                 new_crtc_state->active,
5548                 new_crtc_state->planes_changed,
5549                 new_crtc_state->mode_changed,
5550                 new_crtc_state->active_changed,
5551                 new_crtc_state->connectors_changed);
5552
5553         /* Remove stream for any changed/disabled CRTC */
5554         if (!enable) {
5555
5556                 if (!dm_old_crtc_state->stream)
5557                         goto skip_modeset;
5558
5559                 ret = dm_atomic_get_state(state, &dm_state);
5560                 if (ret)
5561                         goto fail;
5562
5563                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
5564                                 crtc->base.id);
5565
5566                 /* i.e. reset mode */
5567                 if (dc_remove_stream_from_ctx(
5568                                 dm->dc,
5569                                 dm_state->context,
5570                                 dm_old_crtc_state->stream) != DC_OK) {
5571                         ret = -EINVAL;
5572                         goto fail;
5573                 }
5574
5575                 dc_stream_release(dm_old_crtc_state->stream);
5576                 dm_new_crtc_state->stream = NULL;
5577
5578                 reset_freesync_config_for_crtc(dm_new_crtc_state);
5579
5580                 *lock_and_validation_needed = true;
5581
5582         } else {/* Add stream for any updated/enabled CRTC */
5583                 /*
5584                  * Quick fix to prevent NULL pointer on new_stream when
5585                  * added MST connectors not found in existing crtc_state in the chained mode
5586                  * TODO: need to dig out the root cause of that
5587                  */
5588                 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
5589                         goto skip_modeset;
5590
5591                 if (modereset_required(new_crtc_state))
5592                         goto skip_modeset;
5593
5594                 if (modeset_required(new_crtc_state, new_stream,
5595                                      dm_old_crtc_state->stream)) {
5596
5597                         WARN_ON(dm_new_crtc_state->stream);
5598
5599                         ret = dm_atomic_get_state(state, &dm_state);
5600                         if (ret)
5601                                 goto fail;
5602
5603                         dm_new_crtc_state->stream = new_stream;
5604
5605                         dc_stream_retain(new_stream);
5606
5607                         DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
5608                                                 crtc->base.id);
5609
5610                         if (dc_add_stream_to_ctx(
5611                                         dm->dc,
5612                                         dm_state->context,
5613                                         dm_new_crtc_state->stream) != DC_OK) {
5614                                 ret = -EINVAL;
5615                                 goto fail;
5616                         }
5617
5618                         *lock_and_validation_needed = true;
5619                 }
5620         }
5621
5622 skip_modeset:
5623         /* Release extra reference */
5624         if (new_stream)
5625                  dc_stream_release(new_stream);
5626
5627         /*
5628          * We want to do dc stream updates that do not require a
5629          * full modeset below.
5630          */
5631         if (!(enable && aconnector && new_crtc_state->enable &&
5632               new_crtc_state->active))
5633                 return 0;
5634         /*
5635          * Given above conditions, the dc state cannot be NULL because:
5636          * 1. We're in the process of enabling CRTCs (just been added
5637          *    to the dc context, or already is on the context)
5638          * 2. Has a valid connector attached, and
5639          * 3. Is currently active and enabled.
5640          * => The dc stream state currently exists.
5641          */
5642         BUG_ON(dm_new_crtc_state->stream == NULL);
5643
5644         /* Scaling or underscan settings */
5645         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
5646                 update_stream_scaling_settings(
5647                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
5648
5649         /*
5650          * Color management settings. We also update color properties
5651          * when a modeset is needed, to ensure it gets reprogrammed.
5652          */
5653         if (dm_new_crtc_state->base.color_mgmt_changed ||
5654             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
5655                 ret = amdgpu_dm_set_regamma_lut(dm_new_crtc_state);
5656                 if (ret)
5657                         goto fail;
5658                 amdgpu_dm_set_ctm(dm_new_crtc_state);
5659         }
5660
5661         /* Update Freesync settings. */
5662         get_freesync_config_for_crtc(dm_new_crtc_state,
5663                                      dm_new_conn_state);
5664
5665         return ret;
5666
5667 fail:
5668         if (new_stream)
5669                 dc_stream_release(new_stream);
5670         return ret;
5671 }
5672
5673 static int dm_update_plane_state(struct dc *dc,
5674                                  struct drm_atomic_state *state,
5675                                  struct drm_plane *plane,
5676                                  struct drm_plane_state *old_plane_state,
5677                                  struct drm_plane_state *new_plane_state,
5678                                  bool enable,
5679                                  bool *lock_and_validation_needed)
5680 {
5681
5682         struct dm_atomic_state *dm_state = NULL;
5683         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
5684         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
5685         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
5686         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
5687         /* TODO return page_flip_needed() function */
5688         bool pflip_needed  = !state->allow_modeset;
5689         int ret = 0;
5690
5691
5692         new_plane_crtc = new_plane_state->crtc;
5693         old_plane_crtc = old_plane_state->crtc;
5694         dm_new_plane_state = to_dm_plane_state(new_plane_state);
5695         dm_old_plane_state = to_dm_plane_state(old_plane_state);
5696
5697         /*TODO Implement atomic check for cursor plane */
5698         if (plane->type == DRM_PLANE_TYPE_CURSOR)
5699                 return 0;
5700
5701         /* Remove any changed/removed planes */
5702         if (!enable) {
5703                 if (pflip_needed &&
5704                     plane->type != DRM_PLANE_TYPE_OVERLAY)
5705                         return 0;
5706
5707                 if (!old_plane_crtc)
5708                         return 0;
5709
5710                 old_crtc_state = drm_atomic_get_old_crtc_state(
5711                                 state, old_plane_crtc);
5712                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
5713
5714                 if (!dm_old_crtc_state->stream)
5715                         return 0;
5716
5717                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
5718                                 plane->base.id, old_plane_crtc->base.id);
5719
5720                 ret = dm_atomic_get_state(state, &dm_state);
5721                 if (ret)
5722                         return ret;
5723
5724                 if (!dc_remove_plane_from_context(
5725                                 dc,
5726                                 dm_old_crtc_state->stream,
5727                                 dm_old_plane_state->dc_state,
5728                                 dm_state->context)) {
5729
5730                         ret = EINVAL;
5731                         return ret;
5732                 }
5733
5734
5735                 dc_plane_state_release(dm_old_plane_state->dc_state);
5736                 dm_new_plane_state->dc_state = NULL;
5737
5738                 *lock_and_validation_needed = true;
5739
5740         } else { /* Add new planes */
5741                 struct dc_plane_state *dc_new_plane_state;
5742
5743                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
5744                         return 0;
5745
5746                 if (!new_plane_crtc)
5747                         return 0;
5748
5749                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
5750                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
5751
5752                 if (!dm_new_crtc_state->stream)
5753                         return 0;
5754
5755                 if (pflip_needed && plane->type != DRM_PLANE_TYPE_OVERLAY)
5756                         return 0;
5757
5758                 WARN_ON(dm_new_plane_state->dc_state);
5759
5760                 dc_new_plane_state = dc_create_plane_state(dc);
5761                 if (!dc_new_plane_state)
5762                         return -ENOMEM;
5763
5764                 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
5765                                 plane->base.id, new_plane_crtc->base.id);
5766
5767                 ret = fill_plane_attributes(
5768                         new_plane_crtc->dev->dev_private,
5769                         dc_new_plane_state,
5770                         new_plane_state,
5771                         new_crtc_state);
5772                 if (ret) {
5773                         dc_plane_state_release(dc_new_plane_state);
5774                         return ret;
5775                 }
5776
5777                 ret = dm_atomic_get_state(state, &dm_state);
5778                 if (ret) {
5779                         dc_plane_state_release(dc_new_plane_state);
5780                         return ret;
5781                 }
5782
5783                 /*
5784                  * Any atomic check errors that occur after this will
5785                  * not need a release. The plane state will be attached
5786                  * to the stream, and therefore part of the atomic
5787                  * state. It'll be released when the atomic state is
5788                  * cleaned.
5789                  */
5790                 if (!dc_add_plane_to_context(
5791                                 dc,
5792                                 dm_new_crtc_state->stream,
5793                                 dc_new_plane_state,
5794                                 dm_state->context)) {
5795
5796                         dc_plane_state_release(dc_new_plane_state);
5797                         return -EINVAL;
5798                 }
5799
5800                 dm_new_plane_state->dc_state = dc_new_plane_state;
5801
5802                 /* Tell DC to do a full surface update every time there
5803                  * is a plane change. Inefficient, but works for now.
5804                  */
5805                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
5806
5807                 *lock_and_validation_needed = true;
5808         }
5809
5810
5811         return ret;
5812 }
5813
5814 static int
5815 dm_determine_update_type_for_commit(struct dc *dc,
5816                                     struct drm_atomic_state *state,
5817                                     enum surface_update_type *out_type)
5818 {
5819         struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
5820         int i, j, num_plane, ret = 0;
5821         struct drm_plane_state *old_plane_state, *new_plane_state;
5822         struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
5823         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
5824         struct drm_plane *plane;
5825
5826         struct drm_crtc *crtc;
5827         struct drm_crtc_state *new_crtc_state, *old_crtc_state;
5828         struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
5829         struct dc_stream_status *status = NULL;
5830
5831         struct dc_surface_update *updates;
5832         struct dc_plane_state *surface;
5833         enum surface_update_type update_type = UPDATE_TYPE_FAST;
5834
5835         updates = kcalloc(MAX_SURFACES, sizeof(*updates), GFP_KERNEL);
5836         surface = kcalloc(MAX_SURFACES, sizeof(*surface), GFP_KERNEL);
5837
5838         if (!updates || !surface) {
5839                 DRM_ERROR("Plane or surface update failed to allocate");
5840                 /* Set type to FULL to avoid crashing in DC*/
5841                 update_type = UPDATE_TYPE_FULL;
5842                 goto cleanup;
5843         }
5844
5845         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
5846                 struct dc_stream_update stream_update = { 0 };
5847
5848                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
5849                 old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
5850                 num_plane = 0;
5851
5852                 if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
5853                         update_type = UPDATE_TYPE_FULL;
5854                         goto cleanup;
5855                 }
5856
5857                 if (!new_dm_crtc_state->stream)
5858                         continue;
5859
5860                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
5861                         new_plane_crtc = new_plane_state->crtc;
5862                         old_plane_crtc = old_plane_state->crtc;
5863                         new_dm_plane_state = to_dm_plane_state(new_plane_state);
5864                         old_dm_plane_state = to_dm_plane_state(old_plane_state);
5865
5866                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
5867                                 continue;
5868
5869                         if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
5870                                 update_type = UPDATE_TYPE_FULL;
5871                                 goto cleanup;
5872                         }
5873
5874                         if (!state->allow_modeset)
5875                                 continue;
5876
5877                         if (crtc != new_plane_crtc)
5878                                 continue;
5879
5880                         updates[num_plane].surface = &surface[num_plane];
5881
5882                         if (new_crtc_state->mode_changed) {
5883                                 updates[num_plane].surface->src_rect =
5884                                                 new_dm_plane_state->dc_state->src_rect;
5885                                 updates[num_plane].surface->dst_rect =
5886                                                 new_dm_plane_state->dc_state->dst_rect;
5887                                 updates[num_plane].surface->rotation =
5888                                                 new_dm_plane_state->dc_state->rotation;
5889                                 updates[num_plane].surface->in_transfer_func =
5890                                                 new_dm_plane_state->dc_state->in_transfer_func;
5891                                 stream_update.dst = new_dm_crtc_state->stream->dst;
5892                                 stream_update.src = new_dm_crtc_state->stream->src;
5893                         }
5894
5895                         if (new_crtc_state->color_mgmt_changed) {
5896                                 updates[num_plane].gamma =
5897                                                 new_dm_plane_state->dc_state->gamma_correction;
5898                                 updates[num_plane].in_transfer_func =
5899                                                 new_dm_plane_state->dc_state->in_transfer_func;
5900                                 stream_update.gamut_remap =
5901                                                 &new_dm_crtc_state->stream->gamut_remap_matrix;
5902                                 stream_update.out_transfer_func =
5903                                                 new_dm_crtc_state->stream->out_transfer_func;
5904                         }
5905
5906                         num_plane++;
5907                 }
5908
5909                 if (num_plane == 0)
5910                         continue;
5911
5912                 ret = dm_atomic_get_state(state, &dm_state);
5913                 if (ret)
5914                         goto cleanup;
5915
5916                 old_dm_state = dm_atomic_get_old_state(state);
5917                 if (!old_dm_state) {
5918                         ret = -EINVAL;
5919                         goto cleanup;
5920                 }
5921
5922                 status = dc_stream_get_status_from_state(old_dm_state->context,
5923                                                          new_dm_crtc_state->stream);
5924
5925                 update_type = dc_check_update_surfaces_for_stream(dc, updates, num_plane,
5926                                                                   &stream_update, status);
5927
5928                 if (update_type > UPDATE_TYPE_MED) {
5929                         update_type = UPDATE_TYPE_FULL;
5930                         goto cleanup;
5931                 }
5932         }
5933
5934 cleanup:
5935         kfree(updates);
5936         kfree(surface);
5937
5938         *out_type = update_type;
5939         return ret;
5940 }
5941
5942 /**
5943  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
5944  * @dev: The DRM device
5945  * @state: The atomic state to commit
5946  *
5947  * Validate that the given atomic state is programmable by DC into hardware.
5948  * This involves constructing a &struct dc_state reflecting the new hardware
5949  * state we wish to commit, then querying DC to see if it is programmable. It's
5950  * important not to modify the existing DC state. Otherwise, atomic_check
5951  * may unexpectedly commit hardware changes.
5952  *
5953  * When validating the DC state, it's important that the right locks are
5954  * acquired. For full updates case which removes/adds/updates streams on one
5955  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
5956  * that any such full update commit will wait for completion of any outstanding
5957  * flip using DRMs synchronization events. See
5958  * dm_determine_update_type_for_commit()
5959  *
5960  * Note that DM adds the affected connectors for all CRTCs in state, when that
5961  * might not seem necessary. This is because DC stream creation requires the
5962  * DC sink, which is tied to the DRM connector state. Cleaning this up should
5963  * be possible but non-trivial - a possible TODO item.
5964  *
5965  * Return: -Error code if validation failed.
5966  */
5967 static int amdgpu_dm_atomic_check(struct drm_device *dev,
5968                                   struct drm_atomic_state *state)
5969 {
5970         struct amdgpu_device *adev = dev->dev_private;
5971         struct dm_atomic_state *dm_state = NULL;
5972         struct dc *dc = adev->dm.dc;
5973         struct drm_connector *connector;
5974         struct drm_connector_state *old_con_state, *new_con_state;
5975         struct drm_crtc *crtc;
5976         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
5977         struct drm_plane *plane;
5978         struct drm_plane_state *old_plane_state, *new_plane_state;
5979         enum surface_update_type update_type = UPDATE_TYPE_FAST;
5980         enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
5981
5982         int ret, i;
5983
5984         /*
5985          * This bool will be set for true for any modeset/reset
5986          * or plane update which implies non fast surface update.
5987          */
5988         bool lock_and_validation_needed = false;
5989
5990         ret = drm_atomic_helper_check_modeset(dev, state);
5991         if (ret)
5992                 goto fail;
5993
5994         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
5995                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
5996                     !new_crtc_state->color_mgmt_changed &&
5997                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
5998                         continue;
5999
6000                 if (!new_crtc_state->enable)
6001                         continue;
6002
6003                 ret = drm_atomic_add_affected_connectors(state, crtc);
6004                 if (ret)
6005                         return ret;
6006
6007                 ret = drm_atomic_add_affected_planes(state, crtc);
6008                 if (ret)
6009                         goto fail;
6010         }
6011
6012         /*
6013          * Add all primary and overlay planes on the CRTC to the state
6014          * whenever a plane is enabled to maintain correct z-ordering
6015          * and to enable fast surface updates.
6016          */
6017         drm_for_each_crtc(crtc, dev) {
6018                 bool modified = false;
6019
6020                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
6021                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
6022                                 continue;
6023
6024                         if (new_plane_state->crtc == crtc ||
6025                             old_plane_state->crtc == crtc) {
6026                                 modified = true;
6027                                 break;
6028                         }
6029                 }
6030
6031                 if (!modified)
6032                         continue;
6033
6034                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
6035                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
6036                                 continue;
6037
6038                         new_plane_state =
6039                                 drm_atomic_get_plane_state(state, plane);
6040
6041                         if (IS_ERR(new_plane_state)) {
6042                                 ret = PTR_ERR(new_plane_state);
6043                                 goto fail;
6044                         }
6045                 }
6046         }
6047
6048         /* Remove exiting planes if they are modified */
6049         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
6050                 ret = dm_update_plane_state(dc, state, plane,
6051                                             old_plane_state,
6052                                             new_plane_state,
6053                                             false,
6054                                             &lock_and_validation_needed);
6055                 if (ret)
6056                         goto fail;
6057         }
6058
6059         /* Disable all crtcs which require disable */
6060         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
6061                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
6062                                            old_crtc_state,
6063                                            new_crtc_state,
6064                                            false,
6065                                            &lock_and_validation_needed);
6066                 if (ret)
6067                         goto fail;
6068         }
6069
6070         /* Enable all crtcs which require enable */
6071         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
6072                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
6073                                            old_crtc_state,
6074                                            new_crtc_state,
6075                                            true,
6076                                            &lock_and_validation_needed);
6077                 if (ret)
6078                         goto fail;
6079         }
6080
6081         /* Add new/modified planes */
6082         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
6083                 ret = dm_update_plane_state(dc, state, plane,
6084                                             old_plane_state,
6085                                             new_plane_state,
6086                                             true,
6087                                             &lock_and_validation_needed);
6088                 if (ret)
6089                         goto fail;
6090         }
6091
6092         /* Run this here since we want to validate the streams we created */
6093         ret = drm_atomic_helper_check_planes(dev, state);
6094         if (ret)
6095                 goto fail;
6096
6097         /* Check scaling and underscan changes*/
6098         /* TODO Removed scaling changes validation due to inability to commit
6099          * new stream into context w\o causing full reset. Need to
6100          * decide how to handle.
6101          */
6102         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6103                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
6104                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
6105                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
6106
6107                 /* Skip any modesets/resets */
6108                 if (!acrtc || drm_atomic_crtc_needs_modeset(
6109                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
6110                         continue;
6111
6112                 /* Skip any thing not scale or underscan changes */
6113                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
6114                         continue;
6115
6116                 overall_update_type = UPDATE_TYPE_FULL;
6117                 lock_and_validation_needed = true;
6118         }
6119
6120         ret = dm_determine_update_type_for_commit(dc, state, &update_type);
6121         if (ret)
6122                 goto fail;
6123
6124         if (overall_update_type < update_type)
6125                 overall_update_type = update_type;
6126
6127         /*
6128          * lock_and_validation_needed was an old way to determine if we need to set
6129          * the global lock. Leaving it in to check if we broke any corner cases
6130          * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
6131          * lock_and_validation_needed false = UPDATE_TYPE_FAST
6132          */
6133         if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
6134                 WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
6135         else if (!lock_and_validation_needed && overall_update_type > UPDATE_TYPE_FAST)
6136                 WARN(1, "Global lock should NOT be set, overall_update_type should be UPDATE_TYPE_FAST");
6137
6138
6139         if (overall_update_type > UPDATE_TYPE_FAST) {
6140                 ret = dm_atomic_get_state(state, &dm_state);
6141                 if (ret)
6142                         goto fail;
6143
6144                 ret = do_aquire_global_lock(dev, state);
6145                 if (ret)
6146                         goto fail;
6147
6148                 if (dc_validate_global_state(dc, dm_state->context) != DC_OK) {
6149                         ret = -EINVAL;
6150                         goto fail;
6151                 }
6152         } else if (state->legacy_cursor_update) {
6153                 /*
6154                  * This is a fast cursor update coming from the plane update
6155                  * helper, check if it can be done asynchronously for better
6156                  * performance.
6157                  */
6158                 state->async_update = !drm_atomic_helper_async_check(dev, state);
6159         }
6160
6161         /* Must be success */
6162         WARN_ON(ret);
6163         return ret;
6164
6165 fail:
6166         if (ret == -EDEADLK)
6167                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
6168         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
6169                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
6170         else
6171                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
6172
6173         return ret;
6174 }
6175
6176 static bool is_dp_capable_without_timing_msa(struct dc *dc,
6177                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
6178 {
6179         uint8_t dpcd_data;
6180         bool capable = false;
6181
6182         if (amdgpu_dm_connector->dc_link &&
6183                 dm_helpers_dp_read_dpcd(
6184                                 NULL,
6185                                 amdgpu_dm_connector->dc_link,
6186                                 DP_DOWN_STREAM_PORT_COUNT,
6187                                 &dpcd_data,
6188                                 sizeof(dpcd_data))) {
6189                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
6190         }
6191
6192         return capable;
6193 }
6194 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
6195                                         struct edid *edid)
6196 {
6197         int i;
6198         bool edid_check_required;
6199         struct detailed_timing *timing;
6200         struct detailed_non_pixel *data;
6201         struct detailed_data_monitor_range *range;
6202         struct amdgpu_dm_connector *amdgpu_dm_connector =
6203                         to_amdgpu_dm_connector(connector);
6204         struct dm_connector_state *dm_con_state = NULL;
6205
6206         struct drm_device *dev = connector->dev;
6207         struct amdgpu_device *adev = dev->dev_private;
6208         bool freesync_capable = false;
6209
6210         if (!connector->state) {
6211                 DRM_ERROR("%s - Connector has no state", __func__);
6212                 goto update;
6213         }
6214
6215         if (!edid) {
6216                 dm_con_state = to_dm_connector_state(connector->state);
6217
6218                 amdgpu_dm_connector->min_vfreq = 0;
6219                 amdgpu_dm_connector->max_vfreq = 0;
6220                 amdgpu_dm_connector->pixel_clock_mhz = 0;
6221
6222                 goto update;
6223         }
6224
6225         dm_con_state = to_dm_connector_state(connector->state);
6226
6227         edid_check_required = false;
6228         if (!amdgpu_dm_connector->dc_sink) {
6229                 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
6230                 goto update;
6231         }
6232         if (!adev->dm.freesync_module)
6233                 goto update;
6234         /*
6235          * if edid non zero restrict freesync only for dp and edp
6236          */
6237         if (edid) {
6238                 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
6239                         || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
6240                         edid_check_required = is_dp_capable_without_timing_msa(
6241                                                 adev->dm.dc,
6242                                                 amdgpu_dm_connector);
6243                 }
6244         }
6245         if (edid_check_required == true && (edid->version > 1 ||
6246            (edid->version == 1 && edid->revision > 1))) {
6247                 for (i = 0; i < 4; i++) {
6248
6249                         timing  = &edid->detailed_timings[i];
6250                         data    = &timing->data.other_data;
6251                         range   = &data->data.range;
6252                         /*
6253                          * Check if monitor has continuous frequency mode
6254                          */
6255                         if (data->type != EDID_DETAIL_MONITOR_RANGE)
6256                                 continue;
6257                         /*
6258                          * Check for flag range limits only. If flag == 1 then
6259                          * no additional timing information provided.
6260                          * Default GTF, GTF Secondary curve and CVT are not
6261                          * supported
6262                          */
6263                         if (range->flags != 1)
6264                                 continue;
6265
6266                         amdgpu_dm_connector->min_vfreq = range->min_vfreq;
6267                         amdgpu_dm_connector->max_vfreq = range->max_vfreq;
6268                         amdgpu_dm_connector->pixel_clock_mhz =
6269                                 range->pixel_clock_mhz * 10;
6270                         break;
6271                 }
6272
6273                 if (amdgpu_dm_connector->max_vfreq -
6274                     amdgpu_dm_connector->min_vfreq > 10) {
6275
6276                         freesync_capable = true;
6277                 }
6278         }
6279
6280 update:
6281         if (dm_con_state)
6282                 dm_con_state->freesync_capable = freesync_capable;
6283
6284         if (connector->vrr_capable_property)
6285                 drm_connector_set_vrr_capable_property(connector,
6286                                                        freesync_capable);
6287 }
6288