clk: baikal-t1: Convert to platform device driver
[platform/kernel/linux-starfive.git] / drivers / gpu / drm / amd / display / dc / core / dc.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  */
24
25 #include "dm_services.h"
26
27 #include "dc.h"
28
29 #include "core_status.h"
30 #include "core_types.h"
31 #include "hw_sequencer.h"
32 #include "dce/dce_hwseq.h"
33
34 #include "resource.h"
35
36 #include "clk_mgr.h"
37 #include "clock_source.h"
38 #include "dc_bios_types.h"
39
40 #include "bios_parser_interface.h"
41 #include "bios/bios_parser_helper.h"
42 #include "include/irq_service_interface.h"
43 #include "transform.h"
44 #include "dmcu.h"
45 #include "dpp.h"
46 #include "timing_generator.h"
47 #include "abm.h"
48 #include "virtual/virtual_link_encoder.h"
49 #include "hubp.h"
50
51 #include "link_hwss.h"
52 #include "link_encoder.h"
53 #include "link_enc_cfg.h"
54
55 #include "dc_link.h"
56 #include "dc_link_ddc.h"
57 #include "dm_helpers.h"
58 #include "mem_input.h"
59
60 #include "dc_link_dp.h"
61 #include "dc_dmub_srv.h"
62
63 #include "dsc.h"
64
65 #include "vm_helper.h"
66
67 #include "dce/dce_i2c.h"
68
69 #include "dmub/dmub_srv.h"
70
71 #include "i2caux_interface.h"
72
73 #include "dce/dmub_psr.h"
74
75 #include "dce/dmub_hw_lock_mgr.h"
76
77 #include "dc_trace.h"
78
79 #include "dce/dmub_outbox.h"
80
81 #define CTX \
82         dc->ctx
83
84 #define DC_LOGGER \
85         dc->ctx->logger
86
87 static const char DC_BUILD_ID[] = "production-build";
88
89 /**
90  * DOC: Overview
91  *
92  * DC is the OS-agnostic component of the amdgpu DC driver.
93  *
94  * DC maintains and validates a set of structs representing the state of the
95  * driver and writes that state to AMD hardware
96  *
97  * Main DC HW structs:
98  *
99  * struct dc - The central struct.  One per driver.  Created on driver load,
100  * destroyed on driver unload.
101  *
102  * struct dc_context - One per driver.
103  * Used as a backpointer by most other structs in dc.
104  *
105  * struct dc_link - One per connector (the physical DP, HDMI, miniDP, or eDP
106  * plugpoints).  Created on driver load, destroyed on driver unload.
107  *
108  * struct dc_sink - One per display.  Created on boot or hotplug.
109  * Destroyed on shutdown or hotunplug.  A dc_link can have a local sink
110  * (the display directly attached).  It may also have one or more remote
111  * sinks (in the Multi-Stream Transport case)
112  *
113  * struct resource_pool - One per driver.  Represents the hw blocks not in the
114  * main pipeline.  Not directly accessible by dm.
115  *
116  * Main dc state structs:
117  *
118  * These structs can be created and destroyed as needed.  There is a full set of
119  * these structs in dc->current_state representing the currently programmed state.
120  *
121  * struct dc_state - The global DC state to track global state information,
122  * such as bandwidth values.
123  *
124  * struct dc_stream_state - Represents the hw configuration for the pipeline from
125  * a framebuffer to a display.  Maps one-to-one with dc_sink.
126  *
127  * struct dc_plane_state - Represents a framebuffer.  Each stream has at least one,
128  * and may have more in the Multi-Plane Overlay case.
129  *
130  * struct resource_context - Represents the programmable state of everything in
131  * the resource_pool.  Not directly accessible by dm.
132  *
133  * struct pipe_ctx - A member of struct resource_context.  Represents the
134  * internal hardware pipeline components.  Each dc_plane_state has either
135  * one or two (in the pipe-split case).
136  */
137
138 /*******************************************************************************
139  * Private functions
140  ******************************************************************************/
141
142 static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new)
143 {
144         if (new > *original)
145                 *original = new;
146 }
147
148 static void destroy_links(struct dc *dc)
149 {
150         uint32_t i;
151
152         for (i = 0; i < dc->link_count; i++) {
153                 if (NULL != dc->links[i])
154                         link_destroy(&dc->links[i]);
155         }
156 }
157
158 static uint32_t get_num_of_internal_disp(struct dc_link **links, uint32_t num_links)
159 {
160         int i;
161         uint32_t count = 0;
162
163         for (i = 0; i < num_links; i++) {
164                 if (links[i]->connector_signal == SIGNAL_TYPE_EDP ||
165                                 links[i]->is_internal_display)
166                         count++;
167         }
168
169         return count;
170 }
171
172 static int get_seamless_boot_stream_count(struct dc_state *ctx)
173 {
174         uint8_t i;
175         uint8_t seamless_boot_stream_count = 0;
176
177         for (i = 0; i < ctx->stream_count; i++)
178                 if (ctx->streams[i]->apply_seamless_boot_optimization)
179                         seamless_boot_stream_count++;
180
181         return seamless_boot_stream_count;
182 }
183
184 static bool create_links(
185                 struct dc *dc,
186                 uint32_t num_virtual_links)
187 {
188         int i;
189         int connectors_num;
190         struct dc_bios *bios = dc->ctx->dc_bios;
191
192         dc->link_count = 0;
193
194         connectors_num = bios->funcs->get_connectors_number(bios);
195
196         DC_LOG_DC("BIOS object table - number of connectors: %d", connectors_num);
197
198         if (connectors_num > ENUM_ID_COUNT) {
199                 dm_error(
200                         "DC: Number of connectors %d exceeds maximum of %d!\n",
201                         connectors_num,
202                         ENUM_ID_COUNT);
203                 return false;
204         }
205
206         dm_output_to_console(
207                 "DC: %s: connectors_num: physical:%d, virtual:%d\n",
208                 __func__,
209                 connectors_num,
210                 num_virtual_links);
211
212         for (i = 0; i < connectors_num; i++) {
213                 struct link_init_data link_init_params = {0};
214                 struct dc_link *link;
215
216                 DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count);
217
218                 link_init_params.ctx = dc->ctx;
219                 /* next BIOS object table connector */
220                 link_init_params.connector_index = i;
221                 link_init_params.link_index = dc->link_count;
222                 link_init_params.dc = dc;
223                 link = link_create(&link_init_params);
224
225                 if (link) {
226                         dc->links[dc->link_count] = link;
227                         link->dc = dc;
228                         ++dc->link_count;
229                 }
230         }
231
232         DC_LOG_DC("BIOS object table - end");
233
234         /* Create a link for each usb4 dpia port */
235         for (i = 0; i < dc->res_pool->usb4_dpia_count; i++) {
236                 struct link_init_data link_init_params = {0};
237                 struct dc_link *link;
238
239                 link_init_params.ctx = dc->ctx;
240                 link_init_params.connector_index = i;
241                 link_init_params.link_index = dc->link_count;
242                 link_init_params.dc = dc;
243                 link_init_params.is_dpia_link = true;
244
245                 link = link_create(&link_init_params);
246                 if (link) {
247                         dc->links[dc->link_count] = link;
248                         link->dc = dc;
249                         ++dc->link_count;
250                 }
251         }
252
253         for (i = 0; i < num_virtual_links; i++) {
254                 struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL);
255                 struct encoder_init_data enc_init = {0};
256
257                 if (link == NULL) {
258                         BREAK_TO_DEBUGGER();
259                         goto failed_alloc;
260                 }
261
262                 link->link_index = dc->link_count;
263                 dc->links[dc->link_count] = link;
264                 dc->link_count++;
265
266                 link->ctx = dc->ctx;
267                 link->dc = dc;
268                 link->connector_signal = SIGNAL_TYPE_VIRTUAL;
269                 link->link_id.type = OBJECT_TYPE_CONNECTOR;
270                 link->link_id.id = CONNECTOR_ID_VIRTUAL;
271                 link->link_id.enum_id = ENUM_ID_1;
272                 link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
273
274                 if (!link->link_enc) {
275                         BREAK_TO_DEBUGGER();
276                         goto failed_alloc;
277                 }
278
279                 link->link_status.dpcd_caps = &link->dpcd_caps;
280
281                 enc_init.ctx = dc->ctx;
282                 enc_init.channel = CHANNEL_ID_UNKNOWN;
283                 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
284                 enc_init.transmitter = TRANSMITTER_UNKNOWN;
285                 enc_init.connector = link->link_id;
286                 enc_init.encoder.type = OBJECT_TYPE_ENCODER;
287                 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
288                 enc_init.encoder.enum_id = ENUM_ID_1;
289                 virtual_link_encoder_construct(link->link_enc, &enc_init);
290         }
291
292         dc->caps.num_of_internal_disp = get_num_of_internal_disp(dc->links, dc->link_count);
293
294         return true;
295
296 failed_alloc:
297         return false;
298 }
299
300 /* Create additional DIG link encoder objects if fewer than the platform
301  * supports were created during link construction. This can happen if the
302  * number of physical connectors is less than the number of DIGs.
303  */
304 static bool create_link_encoders(struct dc *dc)
305 {
306         bool res = true;
307         unsigned int num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia;
308         unsigned int num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc;
309         int i;
310
311         /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG
312          * link encoders and physical display endpoints and does not require
313          * additional link encoder objects.
314          */
315         if (num_usb4_dpia == 0)
316                 return res;
317
318         /* Create as many link encoder objects as the platform supports. DPIA
319          * endpoints can be programmably mapped to any DIG.
320          */
321         if (num_dig_link_enc > dc->res_pool->dig_link_enc_count) {
322                 for (i = 0; i < num_dig_link_enc; i++) {
323                         struct link_encoder *link_enc = dc->res_pool->link_encoders[i];
324
325                         if (!link_enc && dc->res_pool->funcs->link_enc_create_minimal) {
326                                 link_enc = dc->res_pool->funcs->link_enc_create_minimal(dc->ctx,
327                                                 (enum engine_id)(ENGINE_ID_DIGA + i));
328                                 if (link_enc) {
329                                         dc->res_pool->link_encoders[i] = link_enc;
330                                         dc->res_pool->dig_link_enc_count++;
331                                 } else {
332                                         res = false;
333                                 }
334                         }
335                 }
336         }
337
338         return res;
339 }
340
341 /* Destroy any additional DIG link encoder objects created by
342  * create_link_encoders().
343  * NB: Must only be called after destroy_links().
344  */
345 static void destroy_link_encoders(struct dc *dc)
346 {
347         unsigned int num_usb4_dpia;
348         unsigned int num_dig_link_enc;
349         int i;
350
351         if (!dc->res_pool)
352                 return;
353
354         num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia;
355         num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc;
356
357         /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG
358          * link encoders and physical display endpoints and does not require
359          * additional link encoder objects.
360          */
361         if (num_usb4_dpia == 0)
362                 return;
363
364         for (i = 0; i < num_dig_link_enc; i++) {
365                 struct link_encoder *link_enc = dc->res_pool->link_encoders[i];
366
367                 if (link_enc) {
368                         link_enc->funcs->destroy(&link_enc);
369                         dc->res_pool->link_encoders[i] = NULL;
370                         dc->res_pool->dig_link_enc_count--;
371                 }
372         }
373 }
374
375 static struct dc_perf_trace *dc_perf_trace_create(void)
376 {
377         return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL);
378 }
379
380 static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)
381 {
382         kfree(*perf_trace);
383         *perf_trace = NULL;
384 }
385
386 /**
387  *  dc_stream_adjust_vmin_vmax:
388  *
389  *  Looks up the pipe context of dc_stream_state and updates the
390  *  vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh
391  *  Rate, which is a power-saving feature that targets reducing panel
392  *  refresh rate while the screen is static
393  *
394  *  @dc:     dc reference
395  *  @stream: Initial dc stream state
396  *  @adjust: Updated parameters for vertical_total_min and vertical_total_max
397  */
398 bool dc_stream_adjust_vmin_vmax(struct dc *dc,
399                 struct dc_stream_state *stream,
400                 struct dc_crtc_timing_adjust *adjust)
401 {
402         int i;
403
404         stream->adjust.v_total_max = adjust->v_total_max;
405         stream->adjust.v_total_mid = adjust->v_total_mid;
406         stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num;
407         stream->adjust.v_total_min = adjust->v_total_min;
408
409         for (i = 0; i < MAX_PIPES; i++) {
410                 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
411
412                 if (pipe->stream == stream && pipe->stream_res.tg) {
413                         dc->hwss.set_drr(&pipe,
414                                         1,
415                                         *adjust);
416
417                         return true;
418                 }
419         }
420         return false;
421 }
422
423 /**
424  *****************************************************************************
425  *  Function: dc_stream_get_last_vrr_vtotal
426  *
427  *  @brief
428  *     Looks up the pipe context of dc_stream_state and gets the
429  *     last VTOTAL used by DRR (Dynamic Refresh Rate)
430  *
431  *  @param [in] dc: dc reference
432  *  @param [in] stream: Initial dc stream state
433  *  @param [in] adjust: Updated parameters for vertical_total_min and
434  *  vertical_total_max
435  *****************************************************************************
436  */
437 bool dc_stream_get_last_used_drr_vtotal(struct dc *dc,
438                 struct dc_stream_state *stream,
439                 uint32_t *refresh_rate)
440 {
441         bool status = false;
442
443         int i = 0;
444
445         for (i = 0; i < MAX_PIPES; i++) {
446                 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
447
448                 if (pipe->stream == stream && pipe->stream_res.tg) {
449                         /* Only execute if a function pointer has been defined for
450                          * the DC version in question
451                          */
452                         if (pipe->stream_res.tg->funcs->get_last_used_drr_vtotal) {
453                                 pipe->stream_res.tg->funcs->get_last_used_drr_vtotal(pipe->stream_res.tg, refresh_rate);
454
455                                 status = true;
456
457                                 break;
458                         }
459                 }
460         }
461
462         return status;
463 }
464
465 bool dc_stream_get_crtc_position(struct dc *dc,
466                 struct dc_stream_state **streams, int num_streams,
467                 unsigned int *v_pos, unsigned int *nom_v_pos)
468 {
469         /* TODO: Support multiple streams */
470         const struct dc_stream_state *stream = streams[0];
471         int i;
472         bool ret = false;
473         struct crtc_position position;
474
475         for (i = 0; i < MAX_PIPES; i++) {
476                 struct pipe_ctx *pipe =
477                                 &dc->current_state->res_ctx.pipe_ctx[i];
478
479                 if (pipe->stream == stream && pipe->stream_res.stream_enc) {
480                         dc->hwss.get_position(&pipe, 1, &position);
481
482                         *v_pos = position.vertical_count;
483                         *nom_v_pos = position.nominal_vcount;
484                         ret = true;
485                 }
486         }
487         return ret;
488 }
489
490 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
491 bool dc_stream_forward_dmcu_crc_window(struct dc *dc, struct dc_stream_state *stream,
492                              struct crc_params *crc_window)
493 {
494         int i;
495         struct dmcu *dmcu = dc->res_pool->dmcu;
496         struct pipe_ctx *pipe;
497         struct crc_region tmp_win, *crc_win;
498         struct otg_phy_mux mapping_tmp, *mux_mapping;
499
500         /*crc window can't be null*/
501         if (!crc_window)
502                 return false;
503
504         if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu))) {
505                 crc_win = &tmp_win;
506                 mux_mapping = &mapping_tmp;
507                 /*set crc window*/
508                 tmp_win.x_start = crc_window->windowa_x_start;
509                 tmp_win.y_start = crc_window->windowa_y_start;
510                 tmp_win.x_end = crc_window->windowa_x_end;
511                 tmp_win.y_end = crc_window->windowa_y_end;
512
513                 for (i = 0; i < MAX_PIPES; i++) {
514                         pipe = &dc->current_state->res_ctx.pipe_ctx[i];
515                         if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
516                                 break;
517                 }
518
519                 /* Stream not found */
520                 if (i == MAX_PIPES)
521                         return false;
522
523
524                 /*set mux routing info*/
525                 mapping_tmp.phy_output_num = stream->link->link_enc_hw_inst;
526                 mapping_tmp.otg_output_num = pipe->stream_res.tg->inst;
527
528                 dmcu->funcs->forward_crc_window(dmcu, crc_win, mux_mapping);
529         } else {
530                 DC_LOG_DC("dmcu is not initialized");
531                 return false;
532         }
533
534         return true;
535 }
536
537 bool dc_stream_stop_dmcu_crc_win_update(struct dc *dc, struct dc_stream_state *stream)
538 {
539         int i;
540         struct dmcu *dmcu = dc->res_pool->dmcu;
541         struct pipe_ctx *pipe;
542         struct otg_phy_mux mapping_tmp, *mux_mapping;
543
544         if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu))) {
545                 mux_mapping = &mapping_tmp;
546
547                 for (i = 0; i < MAX_PIPES; i++) {
548                         pipe = &dc->current_state->res_ctx.pipe_ctx[i];
549                         if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
550                                 break;
551                 }
552
553                 /* Stream not found */
554                 if (i == MAX_PIPES)
555                         return false;
556
557
558                 /*set mux routing info*/
559                 mapping_tmp.phy_output_num = stream->link->link_enc_hw_inst;
560                 mapping_tmp.otg_output_num = pipe->stream_res.tg->inst;
561
562                 dmcu->funcs->stop_crc_win_update(dmcu, mux_mapping);
563         } else {
564                 DC_LOG_DC("dmcu is not initialized");
565                 return false;
566         }
567
568         return true;
569 }
570 #endif
571
572 /**
573  * dc_stream_configure_crc() - Configure CRC capture for the given stream.
574  * @dc: DC Object
575  * @stream: The stream to configure CRC on.
576  * @enable: Enable CRC if true, disable otherwise.
577  * @crc_window: CRC window (x/y start/end) information
578  * @continuous: Capture CRC on every frame if true. Otherwise, only capture
579  *              once.
580  *
581  * By default, only CRC0 is configured, and the entire frame is used to
582  * calculate the crc.
583  */
584 bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
585                              struct crc_params *crc_window, bool enable, bool continuous)
586 {
587         int i;
588         struct pipe_ctx *pipe;
589         struct crc_params param;
590         struct timing_generator *tg;
591
592         for (i = 0; i < MAX_PIPES; i++) {
593                 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
594                 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
595                         break;
596         }
597         /* Stream not found */
598         if (i == MAX_PIPES)
599                 return false;
600
601         /* By default, capture the full frame */
602         param.windowa_x_start = 0;
603         param.windowa_y_start = 0;
604         param.windowa_x_end = pipe->stream->timing.h_addressable;
605         param.windowa_y_end = pipe->stream->timing.v_addressable;
606         param.windowb_x_start = 0;
607         param.windowb_y_start = 0;
608         param.windowb_x_end = pipe->stream->timing.h_addressable;
609         param.windowb_y_end = pipe->stream->timing.v_addressable;
610
611         if (crc_window) {
612                 param.windowa_x_start = crc_window->windowa_x_start;
613                 param.windowa_y_start = crc_window->windowa_y_start;
614                 param.windowa_x_end = crc_window->windowa_x_end;
615                 param.windowa_y_end = crc_window->windowa_y_end;
616                 param.windowb_x_start = crc_window->windowb_x_start;
617                 param.windowb_y_start = crc_window->windowb_y_start;
618                 param.windowb_x_end = crc_window->windowb_x_end;
619                 param.windowb_y_end = crc_window->windowb_y_end;
620         }
621
622         param.dsc_mode = pipe->stream->timing.flags.DSC ? 1:0;
623         param.odm_mode = pipe->next_odm_pipe ? 1:0;
624
625         /* Default to the union of both windows */
626         param.selection = UNION_WINDOW_A_B;
627         param.continuous_mode = continuous;
628         param.enable = enable;
629
630         tg = pipe->stream_res.tg;
631
632         /* Only call if supported */
633         if (tg->funcs->configure_crc)
634                 return tg->funcs->configure_crc(tg, &param);
635         DC_LOG_WARNING("CRC capture not supported.");
636         return false;
637 }
638
639 /**
640  * dc_stream_get_crc() - Get CRC values for the given stream.
641  * @dc: DC object
642  * @stream: The DC stream state of the stream to get CRCs from.
643  * @r_cr: CRC value for the first of the 3 channels stored here.
644  * @g_y:  CRC value for the second of the 3 channels stored here.
645  * @b_cb: CRC value for the third of the 3 channels stored here.
646  *
647  * dc_stream_configure_crc needs to be called beforehand to enable CRCs.
648  * Return false if stream is not found, or if CRCs are not enabled.
649  */
650 bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
651                        uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
652 {
653         int i;
654         struct pipe_ctx *pipe;
655         struct timing_generator *tg;
656
657         for (i = 0; i < MAX_PIPES; i++) {
658                 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
659                 if (pipe->stream == stream)
660                         break;
661         }
662         /* Stream not found */
663         if (i == MAX_PIPES)
664                 return false;
665
666         tg = pipe->stream_res.tg;
667
668         if (tg->funcs->get_crc)
669                 return tg->funcs->get_crc(tg, r_cr, g_y, b_cb);
670         DC_LOG_WARNING("CRC capture not supported.");
671         return false;
672 }
673
674 void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,
675                 enum dc_dynamic_expansion option)
676 {
677         /* OPP FMT dyn expansion updates*/
678         int i;
679         struct pipe_ctx *pipe_ctx;
680
681         for (i = 0; i < MAX_PIPES; i++) {
682                 if (dc->current_state->res_ctx.pipe_ctx[i].stream
683                                 == stream) {
684                         pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
685                         pipe_ctx->stream_res.opp->dyn_expansion = option;
686                         pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
687                                         pipe_ctx->stream_res.opp,
688                                         COLOR_SPACE_YCBCR601,
689                                         stream->timing.display_color_depth,
690                                         stream->signal);
691                 }
692         }
693 }
694
695 void dc_stream_set_dither_option(struct dc_stream_state *stream,
696                 enum dc_dither_option option)
697 {
698         struct bit_depth_reduction_params params;
699         struct dc_link *link = stream->link;
700         struct pipe_ctx *pipes = NULL;
701         int i;
702
703         for (i = 0; i < MAX_PIPES; i++) {
704                 if (link->dc->current_state->res_ctx.pipe_ctx[i].stream ==
705                                 stream) {
706                         pipes = &link->dc->current_state->res_ctx.pipe_ctx[i];
707                         break;
708                 }
709         }
710
711         if (!pipes)
712                 return;
713         if (option > DITHER_OPTION_MAX)
714                 return;
715
716         stream->dither_option = option;
717
718         memset(&params, 0, sizeof(params));
719         resource_build_bit_depth_reduction_params(stream, &params);
720         stream->bit_depth_params = params;
721
722         if (pipes->plane_res.xfm &&
723             pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) {
724                 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth(
725                         pipes->plane_res.xfm,
726                         pipes->plane_res.scl_data.lb_params.depth,
727                         &stream->bit_depth_params);
728         }
729
730         pipes->stream_res.opp->funcs->
731                 opp_program_bit_depth_reduction(pipes->stream_res.opp, &params);
732 }
733
734 bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream)
735 {
736         int i;
737         bool ret = false;
738         struct pipe_ctx *pipes;
739
740         for (i = 0; i < MAX_PIPES; i++) {
741                 if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
742                         pipes = &dc->current_state->res_ctx.pipe_ctx[i];
743                         dc->hwss.program_gamut_remap(pipes);
744                         ret = true;
745                 }
746         }
747
748         return ret;
749 }
750
751 bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
752 {
753         int i;
754         bool ret = false;
755         struct pipe_ctx *pipes;
756
757         for (i = 0; i < MAX_PIPES; i++) {
758                 if (dc->current_state->res_ctx.pipe_ctx[i].stream
759                                 == stream) {
760
761                         pipes = &dc->current_state->res_ctx.pipe_ctx[i];
762                         dc->hwss.program_output_csc(dc,
763                                         pipes,
764                                         stream->output_color_space,
765                                         stream->csc_color_matrix.matrix,
766                                         pipes->stream_res.opp->inst);
767                         ret = true;
768                 }
769         }
770
771         return ret;
772 }
773
774 void dc_stream_set_static_screen_params(struct dc *dc,
775                 struct dc_stream_state **streams,
776                 int num_streams,
777                 const struct dc_static_screen_params *params)
778 {
779         int i, j;
780         struct pipe_ctx *pipes_affected[MAX_PIPES];
781         int num_pipes_affected = 0;
782
783         for (i = 0; i < num_streams; i++) {
784                 struct dc_stream_state *stream = streams[i];
785
786                 for (j = 0; j < MAX_PIPES; j++) {
787                         if (dc->current_state->res_ctx.pipe_ctx[j].stream
788                                         == stream) {
789                                 pipes_affected[num_pipes_affected++] =
790                                                 &dc->current_state->res_ctx.pipe_ctx[j];
791                         }
792                 }
793         }
794
795         dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, params);
796 }
797
798 static void dc_destruct(struct dc *dc)
799 {
800         // reset link encoder assignment table on destruct
801         if (dc->res_pool && dc->res_pool->funcs->link_encs_assign)
802                 link_enc_cfg_init(dc, dc->current_state);
803
804         if (dc->current_state) {
805                 dc_release_state(dc->current_state);
806                 dc->current_state = NULL;
807         }
808
809         destroy_links(dc);
810
811         destroy_link_encoders(dc);
812
813         if (dc->clk_mgr) {
814                 dc_destroy_clk_mgr(dc->clk_mgr);
815                 dc->clk_mgr = NULL;
816         }
817
818         dc_destroy_resource_pool(dc);
819
820         if (dc->ctx->gpio_service)
821                 dal_gpio_service_destroy(&dc->ctx->gpio_service);
822
823         if (dc->ctx->created_bios)
824                 dal_bios_parser_destroy(&dc->ctx->dc_bios);
825
826         dc_perf_trace_destroy(&dc->ctx->perf_trace);
827
828         kfree(dc->ctx);
829         dc->ctx = NULL;
830
831         kfree(dc->bw_vbios);
832         dc->bw_vbios = NULL;
833
834         kfree(dc->bw_dceip);
835         dc->bw_dceip = NULL;
836
837         kfree(dc->dcn_soc);
838         dc->dcn_soc = NULL;
839
840         kfree(dc->dcn_ip);
841         dc->dcn_ip = NULL;
842
843         kfree(dc->vm_helper);
844         dc->vm_helper = NULL;
845
846 }
847
848 static bool dc_construct_ctx(struct dc *dc,
849                 const struct dc_init_data *init_params)
850 {
851         struct dc_context *dc_ctx;
852         enum dce_version dc_version = DCE_VERSION_UNKNOWN;
853
854         dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
855         if (!dc_ctx)
856                 return false;
857
858         dc_ctx->cgs_device = init_params->cgs_device;
859         dc_ctx->driver_context = init_params->driver;
860         dc_ctx->dc = dc;
861         dc_ctx->asic_id = init_params->asic_id;
862         dc_ctx->dc_sink_id_count = 0;
863         dc_ctx->dc_stream_id_count = 0;
864         dc_ctx->dce_environment = init_params->dce_environment;
865         dc_ctx->dcn_reg_offsets = init_params->dcn_reg_offsets;
866         dc_ctx->nbio_reg_offsets = init_params->nbio_reg_offsets;
867
868         /* Create logger */
869
870         dc_version = resource_parse_asic_id(init_params->asic_id);
871         dc_ctx->dce_version = dc_version;
872
873         dc_ctx->perf_trace = dc_perf_trace_create();
874         if (!dc_ctx->perf_trace) {
875                 ASSERT_CRITICAL(false);
876                 return false;
877         }
878
879         dc->ctx = dc_ctx;
880
881         return true;
882 }
883
884 static bool dc_construct(struct dc *dc,
885                 const struct dc_init_data *init_params)
886 {
887         struct dc_context *dc_ctx;
888         struct bw_calcs_dceip *dc_dceip;
889         struct bw_calcs_vbios *dc_vbios;
890         struct dcn_soc_bounding_box *dcn_soc;
891         struct dcn_ip_params *dcn_ip;
892
893         dc->config = init_params->flags;
894
895         // Allocate memory for the vm_helper
896         dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL);
897         if (!dc->vm_helper) {
898                 dm_error("%s: failed to create dc->vm_helper\n", __func__);
899                 goto fail;
900         }
901
902         memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides));
903
904         dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
905         if (!dc_dceip) {
906                 dm_error("%s: failed to create dceip\n", __func__);
907                 goto fail;
908         }
909
910         dc->bw_dceip = dc_dceip;
911
912         dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL);
913         if (!dc_vbios) {
914                 dm_error("%s: failed to create vbios\n", __func__);
915                 goto fail;
916         }
917
918         dc->bw_vbios = dc_vbios;
919         dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);
920         if (!dcn_soc) {
921                 dm_error("%s: failed to create dcn_soc\n", __func__);
922                 goto fail;
923         }
924
925         dc->dcn_soc = dcn_soc;
926
927         dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL);
928         if (!dcn_ip) {
929                 dm_error("%s: failed to create dcn_ip\n", __func__);
930                 goto fail;
931         }
932
933         dc->dcn_ip = dcn_ip;
934
935         if (!dc_construct_ctx(dc, init_params)) {
936                 dm_error("%s: failed to create ctx\n", __func__);
937                 goto fail;
938         }
939
940         dc_ctx = dc->ctx;
941
942         /* Resource should construct all asic specific resources.
943          * This should be the only place where we need to parse the asic id
944          */
945         if (init_params->vbios_override)
946                 dc_ctx->dc_bios = init_params->vbios_override;
947         else {
948                 /* Create BIOS parser */
949                 struct bp_init_data bp_init_data;
950
951                 bp_init_data.ctx = dc_ctx;
952                 bp_init_data.bios = init_params->asic_id.atombios_base_address;
953
954                 dc_ctx->dc_bios = dal_bios_parser_create(
955                                 &bp_init_data, dc_ctx->dce_version);
956
957                 if (!dc_ctx->dc_bios) {
958                         ASSERT_CRITICAL(false);
959                         goto fail;
960                 }
961
962                 dc_ctx->created_bios = true;
963         }
964
965         dc->vendor_signature = init_params->vendor_signature;
966
967         /* Create GPIO service */
968         dc_ctx->gpio_service = dal_gpio_service_create(
969                         dc_ctx->dce_version,
970                         dc_ctx->dce_environment,
971                         dc_ctx);
972
973         if (!dc_ctx->gpio_service) {
974                 ASSERT_CRITICAL(false);
975                 goto fail;
976         }
977
978         dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version);
979         if (!dc->res_pool)
980                 goto fail;
981
982         /* set i2c speed if not done by the respective dcnxxx__resource.c */
983         if (dc->caps.i2c_speed_in_khz_hdcp == 0)
984                 dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz;
985
986         dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
987         if (!dc->clk_mgr)
988                 goto fail;
989 #ifdef CONFIG_DRM_AMD_DC_DCN
990         dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present;
991
992         if (dc->res_pool->funcs->update_bw_bounding_box) {
993                 DC_FP_START();
994                 dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
995                 DC_FP_END();
996         }
997 #endif
998
999         /* Creation of current_state must occur after dc->dml
1000          * is initialized in dc_create_resource_pool because
1001          * on creation it copies the contents of dc->dml
1002          */
1003
1004         dc->current_state = dc_create_state(dc);
1005
1006         if (!dc->current_state) {
1007                 dm_error("%s: failed to create validate ctx\n", __func__);
1008                 goto fail;
1009         }
1010
1011         if (!create_links(dc, init_params->num_virtual_links))
1012                 goto fail;
1013
1014         /* Create additional DIG link encoder objects if fewer than the platform
1015          * supports were created during link construction.
1016          */
1017         if (!create_link_encoders(dc))
1018                 goto fail;
1019
1020         dc_resource_state_construct(dc, dc->current_state);
1021
1022         return true;
1023
1024 fail:
1025         return false;
1026 }
1027
1028 static void disable_all_writeback_pipes_for_stream(
1029                 const struct dc *dc,
1030                 struct dc_stream_state *stream,
1031                 struct dc_state *context)
1032 {
1033         int i;
1034
1035         for (i = 0; i < stream->num_wb_info; i++)
1036                 stream->writeback_info[i].wb_enabled = false;
1037 }
1038
1039 static void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *context,
1040                                           struct dc_stream_state *stream, bool lock)
1041 {
1042         int i;
1043
1044         /* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */
1045         if (dc->hwss.interdependent_update_lock)
1046                 dc->hwss.interdependent_update_lock(dc, context, lock);
1047         else {
1048                 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1049                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1050                         struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1051
1052                         // Copied conditions that were previously in dce110_apply_ctx_for_surface
1053                         if (stream == pipe_ctx->stream) {
1054                                 if (!pipe_ctx->top_pipe &&
1055                                         (pipe_ctx->plane_state || old_pipe_ctx->plane_state))
1056                                         dc->hwss.pipe_control_lock(dc, pipe_ctx, lock);
1057                         }
1058                 }
1059         }
1060 }
1061
1062 static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
1063 {
1064         int i, j;
1065         struct dc_state *dangling_context = dc_create_state(dc);
1066         struct dc_state *current_ctx;
1067
1068         if (dangling_context == NULL)
1069                 return;
1070
1071         dc_resource_state_copy_construct(dc->current_state, dangling_context);
1072
1073         for (i = 0; i < dc->res_pool->pipe_count; i++) {
1074                 struct dc_stream_state *old_stream =
1075                                 dc->current_state->res_ctx.pipe_ctx[i].stream;
1076                 bool should_disable = true;
1077                 bool pipe_split_change =
1078                         context->res_ctx.pipe_ctx[i].top_pipe != dc->current_state->res_ctx.pipe_ctx[i].top_pipe;
1079
1080                 for (j = 0; j < context->stream_count; j++) {
1081                         if (old_stream == context->streams[j]) {
1082                                 should_disable = false;
1083                                 break;
1084                         }
1085                 }
1086                 if (!should_disable && pipe_split_change &&
1087                                 dc->current_state->stream_count != context->stream_count)
1088                         should_disable = true;
1089
1090                 if (old_stream && !dc->current_state->res_ctx.pipe_ctx[i].top_pipe) {
1091                         struct pipe_ctx *old_pipe, *new_pipe;
1092
1093                         old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1094                         new_pipe = &context->res_ctx.pipe_ctx[i];
1095
1096                         if (old_pipe->plane_state && !new_pipe->plane_state)
1097                                 should_disable = true;
1098                 }
1099
1100                 if (should_disable && old_stream) {
1101                         dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
1102                         disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
1103
1104                         if (dc->hwss.apply_ctx_for_surface) {
1105                                 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true);
1106                                 dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
1107                                 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, false);
1108                                 dc->hwss.post_unlock_program_front_end(dc, dangling_context);
1109                         }
1110                         if (dc->hwss.program_front_end_for_ctx) {
1111                                 dc->hwss.interdependent_update_lock(dc, dc->current_state, true);
1112                                 dc->hwss.program_front_end_for_ctx(dc, dangling_context);
1113                                 dc->hwss.interdependent_update_lock(dc, dc->current_state, false);
1114                                 dc->hwss.post_unlock_program_front_end(dc, dangling_context);
1115                         }
1116                 }
1117         }
1118
1119         current_ctx = dc->current_state;
1120         dc->current_state = dangling_context;
1121         dc_release_state(current_ctx);
1122 }
1123
1124 static void disable_vbios_mode_if_required(
1125                 struct dc *dc,
1126                 struct dc_state *context)
1127 {
1128         unsigned int i, j;
1129
1130         /* check if timing_changed, disable stream*/
1131         for (i = 0; i < dc->res_pool->pipe_count; i++) {
1132                 struct dc_stream_state *stream = NULL;
1133                 struct dc_link *link = NULL;
1134                 struct pipe_ctx *pipe = NULL;
1135
1136                 pipe = &context->res_ctx.pipe_ctx[i];
1137                 stream = pipe->stream;
1138                 if (stream == NULL)
1139                         continue;
1140
1141                 // only looking for first odm pipe
1142                 if (pipe->prev_odm_pipe)
1143                         continue;
1144
1145                 if (stream->link->local_sink &&
1146                         stream->link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
1147                         link = stream->link;
1148                 }
1149
1150                 if (link != NULL && link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
1151                         unsigned int enc_inst, tg_inst = 0;
1152                         unsigned int pix_clk_100hz;
1153
1154                         enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
1155                         if (enc_inst != ENGINE_ID_UNKNOWN) {
1156                                 for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
1157                                         if (dc->res_pool->stream_enc[j]->id == enc_inst) {
1158                                                 tg_inst = dc->res_pool->stream_enc[j]->funcs->dig_source_otg(
1159                                                         dc->res_pool->stream_enc[j]);
1160                                                 break;
1161                                         }
1162                                 }
1163
1164                                 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1165                                         dc->res_pool->dp_clock_source,
1166                                         tg_inst, &pix_clk_100hz);
1167
1168                                 if (link->link_status.link_active) {
1169                                         uint32_t requested_pix_clk_100hz =
1170                                                 pipe->stream_res.pix_clk_params.requested_pix_clk_100hz;
1171
1172                                         if (pix_clk_100hz != requested_pix_clk_100hz) {
1173                                                 core_link_disable_stream(pipe);
1174                                                 pipe->stream->dpms_off = false;
1175                                         }
1176                                 }
1177                         }
1178                 }
1179         }
1180 }
1181
1182 static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
1183 {
1184         int i;
1185         PERF_TRACE();
1186         for (i = 0; i < MAX_PIPES; i++) {
1187                 int count = 0;
1188                 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
1189
1190                 if (!pipe->plane_state)
1191                         continue;
1192
1193                 /* Timeout 100 ms */
1194                 while (count < 100000) {
1195                         /* Must set to false to start with, due to OR in update function */
1196                         pipe->plane_state->status.is_flip_pending = false;
1197                         dc->hwss.update_pending_status(pipe);
1198                         if (!pipe->plane_state->status.is_flip_pending)
1199                                 break;
1200                         udelay(1);
1201                         count++;
1202                 }
1203                 ASSERT(!pipe->plane_state->status.is_flip_pending);
1204         }
1205         PERF_TRACE();
1206 }
1207
1208 /*******************************************************************************
1209  * Public functions
1210  ******************************************************************************/
1211
1212 struct dc *dc_create(const struct dc_init_data *init_params)
1213 {
1214         struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
1215         unsigned int full_pipe_count;
1216
1217         if (!dc)
1218                 return NULL;
1219
1220         if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) {
1221                 if (!dc_construct_ctx(dc, init_params))
1222                         goto destruct_dc;
1223         } else {
1224                 if (!dc_construct(dc, init_params))
1225                         goto destruct_dc;
1226
1227                 full_pipe_count = dc->res_pool->pipe_count;
1228                 if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
1229                         full_pipe_count--;
1230                 dc->caps.max_streams = min(
1231                                 full_pipe_count,
1232                                 dc->res_pool->stream_enc_count);
1233
1234                 dc->caps.max_links = dc->link_count;
1235                 dc->caps.max_audios = dc->res_pool->audio_count;
1236                 dc->caps.linear_pitch_alignment = 64;
1237
1238                 dc->caps.max_dp_protocol_version = DP_VERSION_1_4;
1239
1240                 dc->caps.max_otg_num = dc->res_pool->res_cap->num_timing_generator;
1241
1242                 if (dc->res_pool->dmcu != NULL)
1243                         dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
1244         }
1245
1246         dc->dcn_reg_offsets = init_params->dcn_reg_offsets;
1247         dc->nbio_reg_offsets = init_params->nbio_reg_offsets;
1248
1249         /* Populate versioning information */
1250         dc->versions.dc_ver = DC_VER;
1251
1252         dc->build_id = DC_BUILD_ID;
1253
1254         DC_LOG_DC("Display Core initialized\n");
1255
1256
1257
1258         return dc;
1259
1260 destruct_dc:
1261         dc_destruct(dc);
1262         kfree(dc);
1263         return NULL;
1264 }
1265
1266 static void detect_edp_presence(struct dc *dc)
1267 {
1268         struct dc_link *edp_links[MAX_NUM_EDP];
1269         struct dc_link *edp_link = NULL;
1270         enum dc_connection_type type;
1271         int i;
1272         int edp_num;
1273
1274         get_edp_links(dc, edp_links, &edp_num);
1275         if (!edp_num)
1276                 return;
1277
1278         for (i = 0; i < edp_num; i++) {
1279                 edp_link = edp_links[i];
1280                 if (dc->config.edp_not_connected) {
1281                         edp_link->edp_sink_present = false;
1282                 } else {
1283                         dc_link_detect_sink(edp_link, &type);
1284                         edp_link->edp_sink_present = (type != dc_connection_none);
1285                 }
1286         }
1287 }
1288
1289 void dc_hardware_init(struct dc *dc)
1290 {
1291
1292         detect_edp_presence(dc);
1293         if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW)
1294                 dc->hwss.init_hw(dc);
1295 }
1296
1297 void dc_init_callbacks(struct dc *dc,
1298                 const struct dc_callback_init *init_params)
1299 {
1300 #ifdef CONFIG_DRM_AMD_DC_HDCP
1301         dc->ctx->cp_psp = init_params->cp_psp;
1302 #endif
1303 }
1304
1305 void dc_deinit_callbacks(struct dc *dc)
1306 {
1307 #ifdef CONFIG_DRM_AMD_DC_HDCP
1308         memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp));
1309 #endif
1310 }
1311
1312 void dc_destroy(struct dc **dc)
1313 {
1314         dc_destruct(*dc);
1315         kfree(*dc);
1316         *dc = NULL;
1317 }
1318
1319 static void enable_timing_multisync(
1320                 struct dc *dc,
1321                 struct dc_state *ctx)
1322 {
1323         int i, multisync_count = 0;
1324         int pipe_count = dc->res_pool->pipe_count;
1325         struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL };
1326
1327         for (i = 0; i < pipe_count; i++) {
1328                 if (!ctx->res_ctx.pipe_ctx[i].stream ||
1329                                 !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled)
1330                         continue;
1331                 if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source)
1332                         continue;
1333                 multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i];
1334                 multisync_count++;
1335         }
1336
1337         if (multisync_count > 0) {
1338                 dc->hwss.enable_per_frame_crtc_position_reset(
1339                         dc, multisync_count, multisync_pipes);
1340         }
1341 }
1342
1343 static void program_timing_sync(
1344                 struct dc *dc,
1345                 struct dc_state *ctx)
1346 {
1347         int i, j, k;
1348         int group_index = 0;
1349         int num_group = 0;
1350         int pipe_count = dc->res_pool->pipe_count;
1351         struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
1352
1353         for (i = 0; i < pipe_count; i++) {
1354                 if (!ctx->res_ctx.pipe_ctx[i].stream
1355                                 || ctx->res_ctx.pipe_ctx[i].top_pipe
1356                                 || ctx->res_ctx.pipe_ctx[i].prev_odm_pipe)
1357                         continue;
1358
1359                 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
1360         }
1361
1362         for (i = 0; i < pipe_count; i++) {
1363                 int group_size = 1;
1364                 enum timing_synchronization_type sync_type = NOT_SYNCHRONIZABLE;
1365                 struct pipe_ctx *pipe_set[MAX_PIPES];
1366
1367                 if (!unsynced_pipes[i])
1368                         continue;
1369
1370                 pipe_set[0] = unsynced_pipes[i];
1371                 unsynced_pipes[i] = NULL;
1372
1373                 /* Add tg to the set, search rest of the tg's for ones with
1374                  * same timing, add all tgs with same timing to the group
1375                  */
1376                 for (j = i + 1; j < pipe_count; j++) {
1377                         if (!unsynced_pipes[j])
1378                                 continue;
1379                         if (sync_type != TIMING_SYNCHRONIZABLE &&
1380                                 dc->hwss.enable_vblanks_synchronization &&
1381                                 unsynced_pipes[j]->stream_res.tg->funcs->align_vblanks &&
1382                                 resource_are_vblanks_synchronizable(
1383                                         unsynced_pipes[j]->stream,
1384                                         pipe_set[0]->stream)) {
1385                                 sync_type = VBLANK_SYNCHRONIZABLE;
1386                                 pipe_set[group_size] = unsynced_pipes[j];
1387                                 unsynced_pipes[j] = NULL;
1388                                 group_size++;
1389                         } else
1390                         if (sync_type != VBLANK_SYNCHRONIZABLE &&
1391                                 resource_are_streams_timing_synchronizable(
1392                                         unsynced_pipes[j]->stream,
1393                                         pipe_set[0]->stream)) {
1394                                 sync_type = TIMING_SYNCHRONIZABLE;
1395                                 pipe_set[group_size] = unsynced_pipes[j];
1396                                 unsynced_pipes[j] = NULL;
1397                                 group_size++;
1398                         }
1399                 }
1400
1401                 /* set first unblanked pipe as master */
1402                 for (j = 0; j < group_size; j++) {
1403                         bool is_blanked;
1404
1405                         if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
1406                                 is_blanked =
1407                                         pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
1408                         else
1409                                 is_blanked =
1410                                         pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
1411                         if (!is_blanked) {
1412                                 if (j == 0)
1413                                         break;
1414
1415                                 swap(pipe_set[0], pipe_set[j]);
1416                                 break;
1417                         }
1418                 }
1419
1420                 for (k = 0; k < group_size; k++) {
1421                         struct dc_stream_status *status = dc_stream_get_status_from_state(ctx, pipe_set[k]->stream);
1422
1423                         status->timing_sync_info.group_id = num_group;
1424                         status->timing_sync_info.group_size = group_size;
1425                         if (k == 0)
1426                                 status->timing_sync_info.master = true;
1427                         else
1428                                 status->timing_sync_info.master = false;
1429
1430                 }
1431
1432                 /* remove any other pipes that are already been synced */
1433                 if (dc->config.use_pipe_ctx_sync_logic) {
1434                         /* check pipe's syncd to decide which pipe to be removed */
1435                         for (j = 1; j < group_size; j++) {
1436                                 if (pipe_set[j]->pipe_idx_syncd == pipe_set[0]->pipe_idx_syncd) {
1437                                         group_size--;
1438                                         pipe_set[j] = pipe_set[group_size];
1439                                         j--;
1440                                 } else
1441                                         /* link slave pipe's syncd with master pipe */
1442                                         pipe_set[j]->pipe_idx_syncd = pipe_set[0]->pipe_idx_syncd;
1443                         }
1444                 } else {
1445                         for (j = j + 1; j < group_size; j++) {
1446                                 bool is_blanked;
1447
1448                                 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
1449                                         is_blanked =
1450                                                 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
1451                                 else
1452                                         is_blanked =
1453                                                 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
1454                                 if (!is_blanked) {
1455                                         group_size--;
1456                                         pipe_set[j] = pipe_set[group_size];
1457                                         j--;
1458                                 }
1459                         }
1460                 }
1461
1462                 if (group_size > 1) {
1463                         if (sync_type == TIMING_SYNCHRONIZABLE) {
1464                                 dc->hwss.enable_timing_synchronization(
1465                                         dc, group_index, group_size, pipe_set);
1466                         } else
1467                                 if (sync_type == VBLANK_SYNCHRONIZABLE) {
1468                                 dc->hwss.enable_vblanks_synchronization(
1469                                         dc, group_index, group_size, pipe_set);
1470                                 }
1471                         group_index++;
1472                 }
1473                 num_group++;
1474         }
1475 }
1476
1477 static bool context_changed(
1478                 struct dc *dc,
1479                 struct dc_state *context)
1480 {
1481         uint8_t i;
1482
1483         if (context->stream_count != dc->current_state->stream_count)
1484                 return true;
1485
1486         for (i = 0; i < dc->current_state->stream_count; i++) {
1487                 if (dc->current_state->streams[i] != context->streams[i])
1488                         return true;
1489         }
1490
1491         return false;
1492 }
1493
1494 bool dc_validate_boot_timing(const struct dc *dc,
1495                                 const struct dc_sink *sink,
1496                                 struct dc_crtc_timing *crtc_timing)
1497 {
1498         struct timing_generator *tg;
1499         struct stream_encoder *se = NULL;
1500
1501         struct dc_crtc_timing hw_crtc_timing = {0};
1502
1503         struct dc_link *link = sink->link;
1504         unsigned int i, enc_inst, tg_inst = 0;
1505
1506         /* Support seamless boot on EDP displays only */
1507         if (sink->sink_signal != SIGNAL_TYPE_EDP) {
1508                 return false;
1509         }
1510
1511         /* Check for enabled DIG to identify enabled display */
1512         if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
1513                 return false;
1514
1515         enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
1516
1517         if (enc_inst == ENGINE_ID_UNKNOWN)
1518                 return false;
1519
1520         for (i = 0; i < dc->res_pool->stream_enc_count; i++) {
1521                 if (dc->res_pool->stream_enc[i]->id == enc_inst) {
1522
1523                         se = dc->res_pool->stream_enc[i];
1524
1525                         tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg(
1526                                 dc->res_pool->stream_enc[i]);
1527                         break;
1528                 }
1529         }
1530
1531         // tg_inst not found
1532         if (i == dc->res_pool->stream_enc_count)
1533                 return false;
1534
1535         if (tg_inst >= dc->res_pool->timing_generator_count)
1536                 return false;
1537
1538         tg = dc->res_pool->timing_generators[tg_inst];
1539
1540         if (!tg->funcs->get_hw_timing)
1541                 return false;
1542
1543         if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing))
1544                 return false;
1545
1546         if (crtc_timing->h_total != hw_crtc_timing.h_total)
1547                 return false;
1548
1549         if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left)
1550                 return false;
1551
1552         if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable)
1553                 return false;
1554
1555         if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right)
1556                 return false;
1557
1558         if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch)
1559                 return false;
1560
1561         if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width)
1562                 return false;
1563
1564         if (crtc_timing->v_total != hw_crtc_timing.v_total)
1565                 return false;
1566
1567         if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top)
1568                 return false;
1569
1570         if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable)
1571                 return false;
1572
1573         if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom)
1574                 return false;
1575
1576         if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch)
1577                 return false;
1578
1579         if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width)
1580                 return false;
1581
1582         /* block DSC for now, as VBIOS does not currently support DSC timings */
1583         if (crtc_timing->flags.DSC)
1584                 return false;
1585
1586         if (dc_is_dp_signal(link->connector_signal)) {
1587                 unsigned int pix_clk_100hz;
1588                 uint32_t numOdmPipes = 1;
1589                 uint32_t id_src[4] = {0};
1590
1591                 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1592                         dc->res_pool->dp_clock_source,
1593                         tg_inst, &pix_clk_100hz);
1594
1595                 if (tg->funcs->get_optc_source)
1596                         tg->funcs->get_optc_source(tg,
1597                                                 &numOdmPipes, &id_src[0], &id_src[1]);
1598
1599                 if (numOdmPipes == 2)
1600                         pix_clk_100hz *= 2;
1601                 if (numOdmPipes == 4)
1602                         pix_clk_100hz *= 4;
1603
1604                 // Note: In rare cases, HW pixclk may differ from crtc's pixclk
1605                 // slightly due to rounding issues in 10 kHz units.
1606                 if (crtc_timing->pix_clk_100hz != pix_clk_100hz)
1607                         return false;
1608
1609                 if (!se->funcs->dp_get_pixel_format)
1610                         return false;
1611
1612                 if (!se->funcs->dp_get_pixel_format(
1613                         se,
1614                         &hw_crtc_timing.pixel_encoding,
1615                         &hw_crtc_timing.display_color_depth))
1616                         return false;
1617
1618                 if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth)
1619                         return false;
1620
1621                 if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding)
1622                         return false;
1623         }
1624
1625         if (link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
1626                 return false;
1627         }
1628
1629         if (is_edp_ilr_optimization_required(link, crtc_timing)) {
1630                 DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n");
1631                 return false;
1632         }
1633
1634         return true;
1635 }
1636
1637 static inline bool should_update_pipe_for_stream(
1638                 struct dc_state *context,
1639                 struct pipe_ctx *pipe_ctx,
1640                 struct dc_stream_state *stream)
1641 {
1642         return (pipe_ctx->stream && pipe_ctx->stream == stream);
1643 }
1644
1645 static inline bool should_update_pipe_for_plane(
1646                 struct dc_state *context,
1647                 struct pipe_ctx *pipe_ctx,
1648                 struct dc_plane_state *plane_state)
1649 {
1650         return (pipe_ctx->plane_state == plane_state);
1651 }
1652
1653 void dc_enable_stereo(
1654         struct dc *dc,
1655         struct dc_state *context,
1656         struct dc_stream_state *streams[],
1657         uint8_t stream_count)
1658 {
1659         int i, j;
1660         struct pipe_ctx *pipe;
1661
1662         for (i = 0; i < MAX_PIPES; i++) {
1663                 if (context != NULL) {
1664                         pipe = &context->res_ctx.pipe_ctx[i];
1665                 } else {
1666                         context = dc->current_state;
1667                         pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1668                 }
1669
1670                 for (j = 0; pipe && j < stream_count; j++)  {
1671                         if (should_update_pipe_for_stream(context, pipe, streams[j]) &&
1672                                 dc->hwss.setup_stereo)
1673                                 dc->hwss.setup_stereo(pipe, dc);
1674                 }
1675         }
1676 }
1677
1678 void dc_trigger_sync(struct dc *dc, struct dc_state *context)
1679 {
1680         if (context->stream_count > 1 && !dc->debug.disable_timing_sync) {
1681                 enable_timing_multisync(dc, context);
1682                 program_timing_sync(dc, context);
1683         }
1684 }
1685
1686 static uint8_t get_stream_mask(struct dc *dc, struct dc_state *context)
1687 {
1688         int i;
1689         unsigned int stream_mask = 0;
1690
1691         for (i = 0; i < dc->res_pool->pipe_count; i++) {
1692                 if (context->res_ctx.pipe_ctx[i].stream)
1693                         stream_mask |= 1 << i;
1694         }
1695
1696         return stream_mask;
1697 }
1698
1699 void dc_z10_restore(const struct dc *dc)
1700 {
1701         if (dc->hwss.z10_restore)
1702                 dc->hwss.z10_restore(dc);
1703 }
1704
1705 void dc_z10_save_init(struct dc *dc)
1706 {
1707         if (dc->hwss.z10_save_init)
1708                 dc->hwss.z10_save_init(dc);
1709 }
1710
1711 /*
1712  * Applies given context to HW and copy it into current context.
1713  * It's up to the user to release the src context afterwards.
1714  */
1715 static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)
1716 {
1717         struct dc_bios *dcb = dc->ctx->dc_bios;
1718         enum dc_status result = DC_ERROR_UNEXPECTED;
1719         struct pipe_ctx *pipe;
1720         int i, k, l;
1721         struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
1722         struct dc_state *old_state;
1723
1724         dc_z10_restore(dc);
1725         dc_allow_idle_optimizations(dc, false);
1726
1727         for (i = 0; i < context->stream_count; i++)
1728                 dc_streams[i] =  context->streams[i];
1729
1730         if (!dcb->funcs->is_accelerated_mode(dcb)) {
1731                 disable_vbios_mode_if_required(dc, context);
1732                 dc->hwss.enable_accelerated_mode(dc, context);
1733         }
1734
1735         if (context->stream_count > get_seamless_boot_stream_count(context) ||
1736                 context->stream_count == 0)
1737                 dc->hwss.prepare_bandwidth(dc, context);
1738
1739         disable_dangling_plane(dc, context);
1740         /* re-program planes for existing stream, in case we need to
1741          * free up plane resource for later use
1742          */
1743         if (dc->hwss.apply_ctx_for_surface) {
1744                 for (i = 0; i < context->stream_count; i++) {
1745                         if (context->streams[i]->mode_changed)
1746                                 continue;
1747                         apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
1748                         dc->hwss.apply_ctx_for_surface(
1749                                 dc, context->streams[i],
1750                                 context->stream_status[i].plane_count,
1751                                 context); /* use new pipe config in new context */
1752                         apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
1753                         dc->hwss.post_unlock_program_front_end(dc, context);
1754                 }
1755         }
1756
1757         /* Program hardware */
1758         for (i = 0; i < dc->res_pool->pipe_count; i++) {
1759                 pipe = &context->res_ctx.pipe_ctx[i];
1760                 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
1761         }
1762
1763         result = dc->hwss.apply_ctx_to_hw(dc, context);
1764
1765         if (result != DC_OK) {
1766                 /* Application of dc_state to hardware stopped. */
1767                 dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY;
1768                 return result;
1769         }
1770
1771         dc_trigger_sync(dc, context);
1772
1773         /* Program all planes within new context*/
1774         if (dc->hwss.program_front_end_for_ctx) {
1775                 dc->hwss.interdependent_update_lock(dc, context, true);
1776                 dc->hwss.program_front_end_for_ctx(dc, context);
1777                 dc->hwss.interdependent_update_lock(dc, context, false);
1778                 dc->hwss.post_unlock_program_front_end(dc, context);
1779         }
1780         for (i = 0; i < context->stream_count; i++) {
1781                 const struct dc_link *link = context->streams[i]->link;
1782
1783                 if (!context->streams[i]->mode_changed)
1784                         continue;
1785
1786                 if (dc->hwss.apply_ctx_for_surface) {
1787                         apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
1788                         dc->hwss.apply_ctx_for_surface(
1789                                         dc, context->streams[i],
1790                                         context->stream_status[i].plane_count,
1791                                         context);
1792                         apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
1793                         dc->hwss.post_unlock_program_front_end(dc, context);
1794                 }
1795
1796                 /*
1797                  * enable stereo
1798                  * TODO rework dc_enable_stereo call to work with validation sets?
1799                  */
1800                 for (k = 0; k < MAX_PIPES; k++) {
1801                         pipe = &context->res_ctx.pipe_ctx[k];
1802
1803                         for (l = 0 ; pipe && l < context->stream_count; l++)  {
1804                                 if (context->streams[l] &&
1805                                         context->streams[l] == pipe->stream &&
1806                                         dc->hwss.setup_stereo)
1807                                         dc->hwss.setup_stereo(pipe, dc);
1808                         }
1809                 }
1810
1811                 CONN_MSG_MODE(link, "{%dx%d, %dx%d@%dKhz}",
1812                                 context->streams[i]->timing.h_addressable,
1813                                 context->streams[i]->timing.v_addressable,
1814                                 context->streams[i]->timing.h_total,
1815                                 context->streams[i]->timing.v_total,
1816                                 context->streams[i]->timing.pix_clk_100hz / 10);
1817         }
1818
1819         dc_enable_stereo(dc, context, dc_streams, context->stream_count);
1820
1821         if (context->stream_count > get_seamless_boot_stream_count(context) ||
1822                 context->stream_count == 0) {
1823                 /* Must wait for no flips to be pending before doing optimize bw */
1824                 wait_for_no_pipes_pending(dc, context);
1825                 /* pplib is notified if disp_num changed */
1826                 dc->hwss.optimize_bandwidth(dc, context);
1827         }
1828
1829         if (dc->ctx->dce_version >= DCE_VERSION_MAX)
1830                 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
1831         else
1832                 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
1833
1834         context->stream_mask = get_stream_mask(dc, context);
1835
1836         if (context->stream_mask != dc->current_state->stream_mask)
1837                 dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, context->stream_mask);
1838
1839         for (i = 0; i < context->stream_count; i++)
1840                 context->streams[i]->mode_changed = false;
1841
1842         old_state = dc->current_state;
1843         dc->current_state = context;
1844
1845         dc_release_state(old_state);
1846
1847         dc_retain_state(dc->current_state);
1848
1849         return result;
1850 }
1851
1852 bool dc_commit_state(struct dc *dc, struct dc_state *context)
1853 {
1854         enum dc_status result = DC_ERROR_UNEXPECTED;
1855         int i;
1856
1857         if (!context_changed(dc, context))
1858                 return DC_OK;
1859
1860         DC_LOG_DC("%s: %d streams\n",
1861                                 __func__, context->stream_count);
1862
1863         for (i = 0; i < context->stream_count; i++) {
1864                 struct dc_stream_state *stream = context->streams[i];
1865
1866                 dc_stream_log(dc, stream);
1867         }
1868
1869         /*
1870          * Previous validation was perfomred with fast_validation = true and
1871          * the full DML state required for hardware programming was skipped.
1872          *
1873          * Re-validate here to calculate these parameters / watermarks.
1874          */
1875         result = dc_validate_global_state(dc, context, false);
1876         if (result != DC_OK) {
1877                 DC_LOG_ERROR("DC commit global validation failure: %s (%d)",
1878                              dc_status_to_str(result), result);
1879                 return result;
1880         }
1881
1882         result = dc_commit_state_no_check(dc, context);
1883
1884         return (result == DC_OK);
1885 }
1886
1887 bool dc_acquire_release_mpc_3dlut(
1888                 struct dc *dc, bool acquire,
1889                 struct dc_stream_state *stream,
1890                 struct dc_3dlut **lut,
1891                 struct dc_transfer_func **shaper)
1892 {
1893         int pipe_idx;
1894         bool ret = false;
1895         bool found_pipe_idx = false;
1896         const struct resource_pool *pool = dc->res_pool;
1897         struct resource_context *res_ctx = &dc->current_state->res_ctx;
1898         int mpcc_id = 0;
1899
1900         if (pool && res_ctx) {
1901                 if (acquire) {
1902                         /*find pipe idx for the given stream*/
1903                         for (pipe_idx = 0; pipe_idx < pool->pipe_count; pipe_idx++) {
1904                                 if (res_ctx->pipe_ctx[pipe_idx].stream == stream) {
1905                                         found_pipe_idx = true;
1906                                         mpcc_id = res_ctx->pipe_ctx[pipe_idx].plane_res.hubp->inst;
1907                                         break;
1908                                 }
1909                         }
1910                 } else
1911                         found_pipe_idx = true;/*for release pipe_idx is not required*/
1912
1913                 if (found_pipe_idx) {
1914                         if (acquire && pool->funcs->acquire_post_bldn_3dlut)
1915                                 ret = pool->funcs->acquire_post_bldn_3dlut(res_ctx, pool, mpcc_id, lut, shaper);
1916                         else if (!acquire && pool->funcs->release_post_bldn_3dlut)
1917                                 ret = pool->funcs->release_post_bldn_3dlut(res_ctx, pool, lut, shaper);
1918                 }
1919         }
1920         return ret;
1921 }
1922
1923 static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
1924 {
1925         int i;
1926         struct pipe_ctx *pipe;
1927
1928         for (i = 0; i < MAX_PIPES; i++) {
1929                 pipe = &context->res_ctx.pipe_ctx[i];
1930
1931                 // Don't check flip pending on phantom pipes
1932                 if (!pipe->plane_state || (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM))
1933                         continue;
1934
1935                 /* Must set to false to start with, due to OR in update function */
1936                 pipe->plane_state->status.is_flip_pending = false;
1937                 dc->hwss.update_pending_status(pipe);
1938                 if (pipe->plane_state->status.is_flip_pending)
1939                         return true;
1940         }
1941         return false;
1942 }
1943
1944 /* Perform updates here which need to be deferred until next vupdate
1945  *
1946  * i.e. blnd lut, 3dlut, and shaper lut bypass regs are double buffered
1947  * but forcing lut memory to shutdown state is immediate. This causes
1948  * single frame corruption as lut gets disabled mid-frame unless shutdown
1949  * is deferred until after entering bypass.
1950  */
1951 static void process_deferred_updates(struct dc *dc)
1952 {
1953         int i = 0;
1954
1955         if (dc->debug.enable_mem_low_power.bits.cm) {
1956                 ASSERT(dc->dcn_ip->max_num_dpp);
1957                 for (i = 0; i < dc->dcn_ip->max_num_dpp; i++)
1958                         if (dc->res_pool->dpps[i]->funcs->dpp_deferred_update)
1959                                 dc->res_pool->dpps[i]->funcs->dpp_deferred_update(dc->res_pool->dpps[i]);
1960         }
1961 }
1962
1963 void dc_post_update_surfaces_to_stream(struct dc *dc)
1964 {
1965         int i;
1966         struct dc_state *context = dc->current_state;
1967
1968         if ((!dc->optimized_required) || get_seamless_boot_stream_count(context) > 0)
1969                 return;
1970
1971         post_surface_trace(dc);
1972
1973         if (dc->ctx->dce_version >= DCE_VERSION_MAX)
1974                 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
1975         else
1976                 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
1977
1978         if (is_flip_pending_in_pipes(dc, context))
1979                 return;
1980
1981         for (i = 0; i < dc->res_pool->pipe_count; i++)
1982                 if (context->res_ctx.pipe_ctx[i].stream == NULL ||
1983                     context->res_ctx.pipe_ctx[i].plane_state == NULL) {
1984                         context->res_ctx.pipe_ctx[i].pipe_idx = i;
1985                         dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
1986                 }
1987
1988         process_deferred_updates(dc);
1989
1990         dc->hwss.optimize_bandwidth(dc, context);
1991
1992         dc->optimized_required = false;
1993         dc->wm_optimized_required = false;
1994 }
1995
1996 static void init_state(struct dc *dc, struct dc_state *context)
1997 {
1998         /* Each context must have their own instance of VBA and in order to
1999          * initialize and obtain IP and SOC the base DML instance from DC is
2000          * initially copied into every context
2001          */
2002         memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
2003 }
2004
2005 struct dc_state *dc_create_state(struct dc *dc)
2006 {
2007         struct dc_state *context = kvzalloc(sizeof(struct dc_state),
2008                                             GFP_KERNEL);
2009
2010         if (!context)
2011                 return NULL;
2012
2013         init_state(dc, context);
2014
2015         kref_init(&context->refcount);
2016
2017         return context;
2018 }
2019
2020 struct dc_state *dc_copy_state(struct dc_state *src_ctx)
2021 {
2022         int i, j;
2023         struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL);
2024
2025         if (!new_ctx)
2026                 return NULL;
2027         memcpy(new_ctx, src_ctx, sizeof(struct dc_state));
2028
2029         for (i = 0; i < MAX_PIPES; i++) {
2030                         struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i];
2031
2032                         if (cur_pipe->top_pipe)
2033                                 cur_pipe->top_pipe =  &new_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
2034
2035                         if (cur_pipe->bottom_pipe)
2036                                 cur_pipe->bottom_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
2037
2038                         if (cur_pipe->prev_odm_pipe)
2039                                 cur_pipe->prev_odm_pipe =  &new_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx];
2040
2041                         if (cur_pipe->next_odm_pipe)
2042                                 cur_pipe->next_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx];
2043
2044         }
2045
2046         for (i = 0; i < new_ctx->stream_count; i++) {
2047                         dc_stream_retain(new_ctx->streams[i]);
2048                         for (j = 0; j < new_ctx->stream_status[i].plane_count; j++)
2049                                 dc_plane_state_retain(
2050                                         new_ctx->stream_status[i].plane_states[j]);
2051         }
2052
2053         kref_init(&new_ctx->refcount);
2054
2055         return new_ctx;
2056 }
2057
2058 void dc_retain_state(struct dc_state *context)
2059 {
2060         kref_get(&context->refcount);
2061 }
2062
2063 static void dc_state_free(struct kref *kref)
2064 {
2065         struct dc_state *context = container_of(kref, struct dc_state, refcount);
2066         dc_resource_state_destruct(context);
2067         kvfree(context);
2068 }
2069
2070 void dc_release_state(struct dc_state *context)
2071 {
2072         kref_put(&context->refcount, dc_state_free);
2073 }
2074
2075 bool dc_set_generic_gpio_for_stereo(bool enable,
2076                 struct gpio_service *gpio_service)
2077 {
2078         enum gpio_result gpio_result = GPIO_RESULT_NON_SPECIFIC_ERROR;
2079         struct gpio_pin_info pin_info;
2080         struct gpio *generic;
2081         struct gpio_generic_mux_config *config = kzalloc(sizeof(struct gpio_generic_mux_config),
2082                            GFP_KERNEL);
2083
2084         if (!config)
2085                 return false;
2086         pin_info = dal_gpio_get_generic_pin_info(gpio_service, GPIO_ID_GENERIC, 0);
2087
2088         if (pin_info.mask == 0xFFFFFFFF || pin_info.offset == 0xFFFFFFFF) {
2089                 kfree(config);
2090                 return false;
2091         } else {
2092                 generic = dal_gpio_service_create_generic_mux(
2093                         gpio_service,
2094                         pin_info.offset,
2095                         pin_info.mask);
2096         }
2097
2098         if (!generic) {
2099                 kfree(config);
2100                 return false;
2101         }
2102
2103         gpio_result = dal_gpio_open(generic, GPIO_MODE_OUTPUT);
2104
2105         config->enable_output_from_mux = enable;
2106         config->mux_select = GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC;
2107
2108         if (gpio_result == GPIO_RESULT_OK)
2109                 gpio_result = dal_mux_setup_config(generic, config);
2110
2111         if (gpio_result == GPIO_RESULT_OK) {
2112                 dal_gpio_close(generic);
2113                 dal_gpio_destroy_generic_mux(&generic);
2114                 kfree(config);
2115                 return true;
2116         } else {
2117                 dal_gpio_close(generic);
2118                 dal_gpio_destroy_generic_mux(&generic);
2119                 kfree(config);
2120                 return false;
2121         }
2122 }
2123
2124 static bool is_surface_in_context(
2125                 const struct dc_state *context,
2126                 const struct dc_plane_state *plane_state)
2127 {
2128         int j;
2129
2130         for (j = 0; j < MAX_PIPES; j++) {
2131                 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2132
2133                 if (plane_state == pipe_ctx->plane_state) {
2134                         return true;
2135                 }
2136         }
2137
2138         return false;
2139 }
2140
2141 static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u)
2142 {
2143         union surface_update_flags *update_flags = &u->surface->update_flags;
2144         enum surface_update_type update_type = UPDATE_TYPE_FAST;
2145
2146         if (!u->plane_info)
2147                 return UPDATE_TYPE_FAST;
2148
2149         if (u->plane_info->color_space != u->surface->color_space) {
2150                 update_flags->bits.color_space_change = 1;
2151                 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2152         }
2153
2154         if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) {
2155                 update_flags->bits.horizontal_mirror_change = 1;
2156                 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2157         }
2158
2159         if (u->plane_info->rotation != u->surface->rotation) {
2160                 update_flags->bits.rotation_change = 1;
2161                 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2162         }
2163
2164         if (u->plane_info->format != u->surface->format) {
2165                 update_flags->bits.pixel_format_change = 1;
2166                 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2167         }
2168
2169         if (u->plane_info->stereo_format != u->surface->stereo_format) {
2170                 update_flags->bits.stereo_format_change = 1;
2171                 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2172         }
2173
2174         if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) {
2175                 update_flags->bits.per_pixel_alpha_change = 1;
2176                 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2177         }
2178
2179         if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) {
2180                 update_flags->bits.global_alpha_change = 1;
2181                 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2182         }
2183
2184         if (u->plane_info->dcc.enable != u->surface->dcc.enable
2185                         || u->plane_info->dcc.dcc_ind_blk != u->surface->dcc.dcc_ind_blk
2186                         || u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) {
2187                 /* During DCC on/off, stutter period is calculated before
2188                  * DCC has fully transitioned. This results in incorrect
2189                  * stutter period calculation. Triggering a full update will
2190                  * recalculate stutter period.
2191                  */
2192                 update_flags->bits.dcc_change = 1;
2193                 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2194         }
2195
2196         if (resource_pixel_format_to_bpp(u->plane_info->format) !=
2197                         resource_pixel_format_to_bpp(u->surface->format)) {
2198                 /* different bytes per element will require full bandwidth
2199                  * and DML calculation
2200                  */
2201                 update_flags->bits.bpp_change = 1;
2202                 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2203         }
2204
2205         if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch
2206                         || u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) {
2207                 update_flags->bits.plane_size_change = 1;
2208                 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2209         }
2210
2211
2212         if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
2213                         sizeof(union dc_tiling_info)) != 0) {
2214                 update_flags->bits.swizzle_change = 1;
2215                 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2216
2217                 /* todo: below are HW dependent, we should add a hook to
2218                  * DCE/N resource and validated there.
2219                  */
2220                 if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
2221                         /* swizzled mode requires RQ to be setup properly,
2222                          * thus need to run DML to calculate RQ settings
2223                          */
2224                         update_flags->bits.bandwidth_change = 1;
2225                         elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2226                 }
2227         }
2228
2229         /* This should be UPDATE_TYPE_FAST if nothing has changed. */
2230         return update_type;
2231 }
2232
2233 static enum surface_update_type get_scaling_info_update_type(
2234                 const struct dc_surface_update *u)
2235 {
2236         union surface_update_flags *update_flags = &u->surface->update_flags;
2237
2238         if (!u->scaling_info)
2239                 return UPDATE_TYPE_FAST;
2240
2241         if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width
2242                         || u->scaling_info->clip_rect.height != u->surface->clip_rect.height
2243                         || u->scaling_info->dst_rect.width != u->surface->dst_rect.width
2244                         || u->scaling_info->dst_rect.height != u->surface->dst_rect.height
2245                         || u->scaling_info->scaling_quality.integer_scaling !=
2246                                 u->surface->scaling_quality.integer_scaling
2247                         ) {
2248                 update_flags->bits.scaling_change = 1;
2249
2250                 if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width
2251                         || u->scaling_info->dst_rect.height < u->surface->dst_rect.height)
2252                                 && (u->scaling_info->dst_rect.width < u->surface->src_rect.width
2253                                         || u->scaling_info->dst_rect.height < u->surface->src_rect.height))
2254                         /* Making dst rect smaller requires a bandwidth change */
2255                         update_flags->bits.bandwidth_change = 1;
2256         }
2257
2258         if (u->scaling_info->src_rect.width != u->surface->src_rect.width
2259                 || u->scaling_info->src_rect.height != u->surface->src_rect.height) {
2260
2261                 update_flags->bits.scaling_change = 1;
2262                 if (u->scaling_info->src_rect.width > u->surface->src_rect.width
2263                                 || u->scaling_info->src_rect.height > u->surface->src_rect.height)
2264                         /* Making src rect bigger requires a bandwidth change */
2265                         update_flags->bits.clock_change = 1;
2266         }
2267
2268         if (u->scaling_info->src_rect.x != u->surface->src_rect.x
2269                         || u->scaling_info->src_rect.y != u->surface->src_rect.y
2270                         || u->scaling_info->clip_rect.x != u->surface->clip_rect.x
2271                         || u->scaling_info->clip_rect.y != u->surface->clip_rect.y
2272                         || u->scaling_info->dst_rect.x != u->surface->dst_rect.x
2273                         || u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
2274                 update_flags->bits.position_change = 1;
2275
2276         if (update_flags->bits.clock_change
2277                         || update_flags->bits.bandwidth_change
2278                         || update_flags->bits.scaling_change)
2279                 return UPDATE_TYPE_FULL;
2280
2281         if (update_flags->bits.position_change)
2282                 return UPDATE_TYPE_MED;
2283
2284         return UPDATE_TYPE_FAST;
2285 }
2286
2287 static enum surface_update_type det_surface_update(const struct dc *dc,
2288                 const struct dc_surface_update *u)
2289 {
2290         const struct dc_state *context = dc->current_state;
2291         enum surface_update_type type;
2292         enum surface_update_type overall_type = UPDATE_TYPE_FAST;
2293         union surface_update_flags *update_flags = &u->surface->update_flags;
2294
2295         if (u->flip_addr)
2296                 update_flags->bits.addr_update = 1;
2297
2298         if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) {
2299                 update_flags->raw = 0xFFFFFFFF;
2300                 return UPDATE_TYPE_FULL;
2301         }
2302
2303         update_flags->raw = 0; // Reset all flags
2304
2305         type = get_plane_info_update_type(u);
2306         elevate_update_type(&overall_type, type);
2307
2308         type = get_scaling_info_update_type(u);
2309         elevate_update_type(&overall_type, type);
2310
2311         if (u->flip_addr)
2312                 update_flags->bits.addr_update = 1;
2313
2314         if (u->in_transfer_func)
2315                 update_flags->bits.in_transfer_func_change = 1;
2316
2317         if (u->input_csc_color_matrix)
2318                 update_flags->bits.input_csc_change = 1;
2319
2320         if (u->coeff_reduction_factor)
2321                 update_flags->bits.coeff_reduction_change = 1;
2322
2323         if (u->gamut_remap_matrix)
2324                 update_flags->bits.gamut_remap_change = 1;
2325
2326         if (u->gamma) {
2327                 enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN;
2328
2329                 if (u->plane_info)
2330                         format = u->plane_info->format;
2331                 else if (u->surface)
2332                         format = u->surface->format;
2333
2334                 if (dce_use_lut(format))
2335                         update_flags->bits.gamma_change = 1;
2336         }
2337
2338         if (u->lut3d_func || u->func_shaper)
2339                 update_flags->bits.lut_3d = 1;
2340
2341         if (u->hdr_mult.value)
2342                 if (u->hdr_mult.value != u->surface->hdr_mult.value) {
2343                         update_flags->bits.hdr_mult = 1;
2344                         elevate_update_type(&overall_type, UPDATE_TYPE_MED);
2345                 }
2346
2347         if (update_flags->bits.in_transfer_func_change) {
2348                 type = UPDATE_TYPE_MED;
2349                 elevate_update_type(&overall_type, type);
2350         }
2351
2352         if (update_flags->bits.input_csc_change
2353                         || update_flags->bits.coeff_reduction_change
2354                         || update_flags->bits.lut_3d
2355                         || update_flags->bits.gamma_change
2356                         || update_flags->bits.gamut_remap_change) {
2357                 type = UPDATE_TYPE_FULL;
2358                 elevate_update_type(&overall_type, type);
2359         }
2360
2361         return overall_type;
2362 }
2363
2364 static enum surface_update_type check_update_surfaces_for_stream(
2365                 struct dc *dc,
2366                 struct dc_surface_update *updates,
2367                 int surface_count,
2368                 struct dc_stream_update *stream_update,
2369                 const struct dc_stream_status *stream_status)
2370 {
2371         int i;
2372         enum surface_update_type overall_type = UPDATE_TYPE_FAST;
2373
2374         if (dc->idle_optimizations_allowed)
2375                 overall_type = UPDATE_TYPE_FULL;
2376
2377         if (stream_status == NULL || stream_status->plane_count != surface_count)
2378                 overall_type = UPDATE_TYPE_FULL;
2379
2380         if (stream_update && stream_update->pending_test_pattern) {
2381                 overall_type = UPDATE_TYPE_FULL;
2382         }
2383
2384         /* some stream updates require passive update */
2385         if (stream_update) {
2386                 union stream_update_flags *su_flags = &stream_update->stream->update_flags;
2387
2388                 if ((stream_update->src.height != 0 && stream_update->src.width != 0) ||
2389                         (stream_update->dst.height != 0 && stream_update->dst.width != 0) ||
2390                         stream_update->integer_scaling_update)
2391                         su_flags->bits.scaling = 1;
2392
2393                 if (stream_update->out_transfer_func)
2394                         su_flags->bits.out_tf = 1;
2395
2396                 if (stream_update->abm_level)
2397                         su_flags->bits.abm_level = 1;
2398
2399                 if (stream_update->dpms_off)
2400                         su_flags->bits.dpms_off = 1;
2401
2402                 if (stream_update->gamut_remap)
2403                         su_flags->bits.gamut_remap = 1;
2404
2405                 if (stream_update->wb_update)
2406                         su_flags->bits.wb_update = 1;
2407
2408                 if (stream_update->dsc_config)
2409                         su_flags->bits.dsc_changed = 1;
2410
2411                 if (stream_update->mst_bw_update)
2412                         su_flags->bits.mst_bw = 1;
2413                 if (stream_update->crtc_timing_adjust && dc_extended_blank_supported(dc))
2414                         su_flags->bits.crtc_timing_adjust = 1;
2415
2416                 if (su_flags->raw != 0)
2417                         overall_type = UPDATE_TYPE_FULL;
2418
2419                 if (stream_update->output_csc_transform || stream_update->output_color_space)
2420                         su_flags->bits.out_csc = 1;
2421         }
2422
2423         for (i = 0 ; i < surface_count; i++) {
2424                 enum surface_update_type type =
2425                                 det_surface_update(dc, &updates[i]);
2426
2427                 elevate_update_type(&overall_type, type);
2428         }
2429
2430         return overall_type;
2431 }
2432
2433 static bool dc_check_is_fullscreen_video(struct rect src, struct rect clip_rect)
2434 {
2435         int view_height, view_width, clip_x, clip_y, clip_width, clip_height;
2436
2437         view_height = src.height;
2438         view_width = src.width;
2439
2440         clip_x = clip_rect.x;
2441         clip_y = clip_rect.y;
2442
2443         clip_width = clip_rect.width;
2444         clip_height = clip_rect.height;
2445
2446         /* check for centered video accounting for off by 1 scaling truncation */
2447         if ((view_height - clip_y - clip_height <= clip_y + 1) &&
2448                         (view_width - clip_x - clip_width <= clip_x + 1) &&
2449                         (view_height - clip_y - clip_height >= clip_y - 1) &&
2450                         (view_width - clip_x - clip_width >= clip_x - 1)) {
2451
2452                 /* when OS scales up/down to letter box, it may end up
2453                  * with few blank pixels on the border due to truncating.
2454                  * Add offset margin to account for this
2455                  */
2456                 if (clip_x <= 4 || clip_y <= 4)
2457                         return true;
2458         }
2459
2460         return false;
2461 }
2462
2463 static enum surface_update_type check_boundary_crossing_for_windowed_mpo_with_odm(struct dc *dc,
2464                 struct dc_surface_update *srf_updates, int surface_count,
2465                 enum surface_update_type update_type)
2466 {
2467         enum surface_update_type new_update_type = update_type;
2468         int i, j;
2469         struct pipe_ctx *pipe = NULL;
2470         struct dc_stream_state *stream;
2471
2472         /* Check that we are in windowed MPO with ODM
2473          * - look for MPO pipe by scanning pipes for first pipe matching
2474          *   surface that has moved ( position change )
2475          * - MPO pipe will have top pipe
2476          * - check that top pipe has ODM pointer
2477          */
2478         if ((surface_count > 1) && dc->config.enable_windowed_mpo_odm) {
2479                 for (i = 0; i < surface_count; i++) {
2480                         if (srf_updates[i].surface && srf_updates[i].scaling_info
2481                                         && srf_updates[i].surface->update_flags.bits.position_change) {
2482
2483                                 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2484                                         if (srf_updates[i].surface == dc->current_state->res_ctx.pipe_ctx[j].plane_state) {
2485                                                 pipe = &dc->current_state->res_ctx.pipe_ctx[j];
2486                                                 stream = pipe->stream;
2487                                                 break;
2488                                         }
2489                                 }
2490
2491                                 if (pipe && pipe->top_pipe && (get_num_odm_splits(pipe->top_pipe) > 0) && stream
2492                                                 && !dc_check_is_fullscreen_video(stream->src, srf_updates[i].scaling_info->clip_rect)) {
2493                                         struct rect old_clip_rect, new_clip_rect;
2494                                         bool old_clip_rect_left, old_clip_rect_right, old_clip_rect_middle;
2495                                         bool new_clip_rect_left, new_clip_rect_right, new_clip_rect_middle;
2496
2497                                         old_clip_rect = srf_updates[i].surface->clip_rect;
2498                                         new_clip_rect = srf_updates[i].scaling_info->clip_rect;
2499
2500                                         old_clip_rect_left = ((old_clip_rect.x + old_clip_rect.width) <= (stream->src.x + (stream->src.width/2)));
2501                                         old_clip_rect_right = (old_clip_rect.x >= (stream->src.x + (stream->src.width/2)));
2502                                         old_clip_rect_middle = !old_clip_rect_left && !old_clip_rect_right;
2503
2504                                         new_clip_rect_left = ((new_clip_rect.x + new_clip_rect.width) <= (stream->src.x + (stream->src.width/2)));
2505                                         new_clip_rect_right = (new_clip_rect.x >= (stream->src.x + (stream->src.width/2)));
2506                                         new_clip_rect_middle = !new_clip_rect_left && !new_clip_rect_right;
2507
2508                                         if (old_clip_rect_left && new_clip_rect_middle)
2509                                                 new_update_type = UPDATE_TYPE_FULL;
2510                                         else if (old_clip_rect_middle && new_clip_rect_right)
2511                                                 new_update_type = UPDATE_TYPE_FULL;
2512                                         else if (old_clip_rect_right && new_clip_rect_middle)
2513                                                 new_update_type = UPDATE_TYPE_FULL;
2514                                         else if (old_clip_rect_middle && new_clip_rect_left)
2515                                                 new_update_type = UPDATE_TYPE_FULL;
2516                                 }
2517                         }
2518                 }
2519         }
2520         return new_update_type;
2521 }
2522
2523 /*
2524  * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full)
2525  *
2526  * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types
2527  */
2528 enum surface_update_type dc_check_update_surfaces_for_stream(
2529                 struct dc *dc,
2530                 struct dc_surface_update *updates,
2531                 int surface_count,
2532                 struct dc_stream_update *stream_update,
2533                 const struct dc_stream_status *stream_status)
2534 {
2535         int i;
2536         enum surface_update_type type;
2537
2538         if (stream_update)
2539                 stream_update->stream->update_flags.raw = 0;
2540         for (i = 0; i < surface_count; i++)
2541                 updates[i].surface->update_flags.raw = 0;
2542
2543         type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
2544         if (type == UPDATE_TYPE_FULL) {
2545                 if (stream_update) {
2546                         uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed;
2547                         stream_update->stream->update_flags.raw = 0xFFFFFFFF;
2548                         stream_update->stream->update_flags.bits.dsc_changed = dsc_changed;
2549                 }
2550                 for (i = 0; i < surface_count; i++)
2551                         updates[i].surface->update_flags.raw = 0xFFFFFFFF;
2552         }
2553
2554         if (type == UPDATE_TYPE_MED)
2555                 type = check_boundary_crossing_for_windowed_mpo_with_odm(dc,
2556                                 updates, surface_count, type);
2557
2558         if (type == UPDATE_TYPE_FAST) {
2559                 // If there's an available clock comparator, we use that.
2560                 if (dc->clk_mgr->funcs->are_clock_states_equal) {
2561                         if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk))
2562                                 dc->optimized_required = true;
2563                 // Else we fallback to mem compare.
2564                 } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
2565                         dc->optimized_required = true;
2566                 }
2567
2568                 dc->optimized_required |= dc->wm_optimized_required;
2569         }
2570
2571         return type;
2572 }
2573
2574 static struct dc_stream_status *stream_get_status(
2575         struct dc_state *ctx,
2576         struct dc_stream_state *stream)
2577 {
2578         uint8_t i;
2579
2580         for (i = 0; i < ctx->stream_count; i++) {
2581                 if (stream == ctx->streams[i]) {
2582                         return &ctx->stream_status[i];
2583                 }
2584         }
2585
2586         return NULL;
2587 }
2588
2589 static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
2590
2591 static void copy_surface_update_to_plane(
2592                 struct dc_plane_state *surface,
2593                 struct dc_surface_update *srf_update)
2594 {
2595         if (srf_update->flip_addr) {
2596                 surface->address = srf_update->flip_addr->address;
2597                 surface->flip_immediate =
2598                         srf_update->flip_addr->flip_immediate;
2599                 surface->time.time_elapsed_in_us[surface->time.index] =
2600                         srf_update->flip_addr->flip_timestamp_in_us -
2601                                 surface->time.prev_update_time_in_us;
2602                 surface->time.prev_update_time_in_us =
2603                         srf_update->flip_addr->flip_timestamp_in_us;
2604                 surface->time.index++;
2605                 if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX)
2606                         surface->time.index = 0;
2607
2608                 surface->triplebuffer_flips = srf_update->flip_addr->triplebuffer_flips;
2609         }
2610
2611         if (srf_update->scaling_info) {
2612                 surface->scaling_quality =
2613                                 srf_update->scaling_info->scaling_quality;
2614                 surface->dst_rect =
2615                                 srf_update->scaling_info->dst_rect;
2616                 surface->src_rect =
2617                                 srf_update->scaling_info->src_rect;
2618                 surface->clip_rect =
2619                                 srf_update->scaling_info->clip_rect;
2620         }
2621
2622         if (srf_update->plane_info) {
2623                 surface->color_space =
2624                                 srf_update->plane_info->color_space;
2625                 surface->format =
2626                                 srf_update->plane_info->format;
2627                 surface->plane_size =
2628                                 srf_update->plane_info->plane_size;
2629                 surface->rotation =
2630                                 srf_update->plane_info->rotation;
2631                 surface->horizontal_mirror =
2632                                 srf_update->plane_info->horizontal_mirror;
2633                 surface->stereo_format =
2634                                 srf_update->plane_info->stereo_format;
2635                 surface->tiling_info =
2636                                 srf_update->plane_info->tiling_info;
2637                 surface->visible =
2638                                 srf_update->plane_info->visible;
2639                 surface->per_pixel_alpha =
2640                                 srf_update->plane_info->per_pixel_alpha;
2641                 surface->global_alpha =
2642                                 srf_update->plane_info->global_alpha;
2643                 surface->global_alpha_value =
2644                                 srf_update->plane_info->global_alpha_value;
2645                 surface->dcc =
2646                                 srf_update->plane_info->dcc;
2647                 surface->layer_index =
2648                                 srf_update->plane_info->layer_index;
2649         }
2650
2651         if (srf_update->gamma &&
2652                         (surface->gamma_correction !=
2653                                         srf_update->gamma)) {
2654                 memcpy(&surface->gamma_correction->entries,
2655                         &srf_update->gamma->entries,
2656                         sizeof(struct dc_gamma_entries));
2657                 surface->gamma_correction->is_identity =
2658                         srf_update->gamma->is_identity;
2659                 surface->gamma_correction->num_entries =
2660                         srf_update->gamma->num_entries;
2661                 surface->gamma_correction->type =
2662                         srf_update->gamma->type;
2663         }
2664
2665         if (srf_update->in_transfer_func &&
2666                         (surface->in_transfer_func !=
2667                                 srf_update->in_transfer_func)) {
2668                 surface->in_transfer_func->sdr_ref_white_level =
2669                         srf_update->in_transfer_func->sdr_ref_white_level;
2670                 surface->in_transfer_func->tf =
2671                         srf_update->in_transfer_func->tf;
2672                 surface->in_transfer_func->type =
2673                         srf_update->in_transfer_func->type;
2674                 memcpy(&surface->in_transfer_func->tf_pts,
2675                         &srf_update->in_transfer_func->tf_pts,
2676                         sizeof(struct dc_transfer_func_distributed_points));
2677         }
2678
2679         if (srf_update->func_shaper &&
2680                         (surface->in_shaper_func !=
2681                         srf_update->func_shaper))
2682                 memcpy(surface->in_shaper_func, srf_update->func_shaper,
2683                 sizeof(*surface->in_shaper_func));
2684
2685         if (srf_update->lut3d_func &&
2686                         (surface->lut3d_func !=
2687                         srf_update->lut3d_func))
2688                 memcpy(surface->lut3d_func, srf_update->lut3d_func,
2689                 sizeof(*surface->lut3d_func));
2690
2691         if (srf_update->hdr_mult.value)
2692                 surface->hdr_mult =
2693                                 srf_update->hdr_mult;
2694
2695         if (srf_update->blend_tf &&
2696                         (surface->blend_tf !=
2697                         srf_update->blend_tf))
2698                 memcpy(surface->blend_tf, srf_update->blend_tf,
2699                 sizeof(*surface->blend_tf));
2700
2701         if (srf_update->input_csc_color_matrix)
2702                 surface->input_csc_color_matrix =
2703                         *srf_update->input_csc_color_matrix;
2704
2705         if (srf_update->coeff_reduction_factor)
2706                 surface->coeff_reduction_factor =
2707                         *srf_update->coeff_reduction_factor;
2708
2709         if (srf_update->gamut_remap_matrix)
2710                 surface->gamut_remap_matrix =
2711                         *srf_update->gamut_remap_matrix;
2712 }
2713
2714 static void copy_stream_update_to_stream(struct dc *dc,
2715                                          struct dc_state *context,
2716                                          struct dc_stream_state *stream,
2717                                          struct dc_stream_update *update)
2718 {
2719         struct dc_context *dc_ctx = dc->ctx;
2720
2721         if (update == NULL || stream == NULL)
2722                 return;
2723
2724         if (update->src.height && update->src.width)
2725                 stream->src = update->src;
2726
2727         if (update->dst.height && update->dst.width)
2728                 stream->dst = update->dst;
2729
2730         if (update->out_transfer_func &&
2731             stream->out_transfer_func != update->out_transfer_func) {
2732                 stream->out_transfer_func->sdr_ref_white_level =
2733                         update->out_transfer_func->sdr_ref_white_level;
2734                 stream->out_transfer_func->tf = update->out_transfer_func->tf;
2735                 stream->out_transfer_func->type =
2736                         update->out_transfer_func->type;
2737                 memcpy(&stream->out_transfer_func->tf_pts,
2738                        &update->out_transfer_func->tf_pts,
2739                        sizeof(struct dc_transfer_func_distributed_points));
2740         }
2741
2742         if (update->hdr_static_metadata)
2743                 stream->hdr_static_metadata = *update->hdr_static_metadata;
2744
2745         if (update->abm_level)
2746                 stream->abm_level = *update->abm_level;
2747
2748         if (update->periodic_interrupt0)
2749                 stream->periodic_interrupt0 = *update->periodic_interrupt0;
2750
2751         if (update->periodic_interrupt1)
2752                 stream->periodic_interrupt1 = *update->periodic_interrupt1;
2753
2754         if (update->gamut_remap)
2755                 stream->gamut_remap_matrix = *update->gamut_remap;
2756
2757         /* Note: this being updated after mode set is currently not a use case
2758          * however if it arises OCSC would need to be reprogrammed at the
2759          * minimum
2760          */
2761         if (update->output_color_space)
2762                 stream->output_color_space = *update->output_color_space;
2763
2764         if (update->output_csc_transform)
2765                 stream->csc_color_matrix = *update->output_csc_transform;
2766
2767         if (update->vrr_infopacket)
2768                 stream->vrr_infopacket = *update->vrr_infopacket;
2769
2770         if (update->allow_freesync)
2771                 stream->allow_freesync = *update->allow_freesync;
2772
2773         if (update->vrr_active_variable)
2774                 stream->vrr_active_variable = *update->vrr_active_variable;
2775
2776         if (update->crtc_timing_adjust)
2777                 stream->adjust = *update->crtc_timing_adjust;
2778
2779         if (update->dpms_off)
2780                 stream->dpms_off = *update->dpms_off;
2781
2782         if (update->hfvsif_infopacket)
2783                 stream->hfvsif_infopacket = *update->hfvsif_infopacket;
2784
2785         if (update->vtem_infopacket)
2786                 stream->vtem_infopacket = *update->vtem_infopacket;
2787
2788         if (update->vsc_infopacket)
2789                 stream->vsc_infopacket = *update->vsc_infopacket;
2790
2791         if (update->vsp_infopacket)
2792                 stream->vsp_infopacket = *update->vsp_infopacket;
2793
2794         if (update->dither_option)
2795                 stream->dither_option = *update->dither_option;
2796
2797         if (update->pending_test_pattern)
2798                 stream->test_pattern = *update->pending_test_pattern;
2799         /* update current stream with writeback info */
2800         if (update->wb_update) {
2801                 int i;
2802
2803                 stream->num_wb_info = update->wb_update->num_wb_info;
2804                 ASSERT(stream->num_wb_info <= MAX_DWB_PIPES);
2805                 for (i = 0; i < stream->num_wb_info; i++)
2806                         stream->writeback_info[i] =
2807                                 update->wb_update->writeback_info[i];
2808         }
2809         if (update->dsc_config) {
2810                 struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg;
2811                 uint32_t old_dsc_enabled = stream->timing.flags.DSC;
2812                 uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 &&
2813                                        update->dsc_config->num_slices_v != 0);
2814
2815                 /* Use temporarry context for validating new DSC config */
2816                 struct dc_state *dsc_validate_context = dc_create_state(dc);
2817
2818                 if (dsc_validate_context) {
2819                         dc_resource_state_copy_construct(dc->current_state, dsc_validate_context);
2820
2821                         stream->timing.dsc_cfg = *update->dsc_config;
2822                         stream->timing.flags.DSC = enable_dsc;
2823                         if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) {
2824                                 stream->timing.dsc_cfg = old_dsc_cfg;
2825                                 stream->timing.flags.DSC = old_dsc_enabled;
2826                                 update->dsc_config = NULL;
2827                         }
2828
2829                         dc_release_state(dsc_validate_context);
2830                 } else {
2831                         DC_ERROR("Failed to allocate new validate context for DSC change\n");
2832                         update->dsc_config = NULL;
2833                 }
2834         }
2835 }
2836
2837 void dc_reset_state(struct dc *dc, struct dc_state *context)
2838 {
2839         dc_resource_state_destruct(context);
2840
2841         /* clear the structure, but don't reset the reference count */
2842         memset(context, 0, offsetof(struct dc_state, refcount));
2843
2844         init_state(dc, context);
2845 }
2846
2847 static bool update_planes_and_stream_state(struct dc *dc,
2848                 struct dc_surface_update *srf_updates, int surface_count,
2849                 struct dc_stream_state *stream,
2850                 struct dc_stream_update *stream_update,
2851                 enum surface_update_type *new_update_type,
2852                 struct dc_state **new_context)
2853 {
2854         struct dc_state *context;
2855         int i, j;
2856         enum surface_update_type update_type;
2857         const struct dc_stream_status *stream_status;
2858         struct dc_context *dc_ctx = dc->ctx;
2859
2860         stream_status = dc_stream_get_status(stream);
2861
2862         if (!stream_status) {
2863                 if (surface_count) /* Only an error condition if surf_count non-zero*/
2864                         ASSERT(false);
2865
2866                 return false; /* Cannot commit surface to stream that is not committed */
2867         }
2868
2869         context = dc->current_state;
2870
2871         update_type = dc_check_update_surfaces_for_stream(
2872                         dc, srf_updates, surface_count, stream_update, stream_status);
2873
2874         /* update current stream with the new updates */
2875         copy_stream_update_to_stream(dc, context, stream, stream_update);
2876
2877         /* do not perform surface update if surface has invalid dimensions
2878          * (all zero) and no scaling_info is provided
2879          */
2880         if (surface_count > 0) {
2881                 for (i = 0; i < surface_count; i++) {
2882                         if ((srf_updates[i].surface->src_rect.width == 0 ||
2883                                  srf_updates[i].surface->src_rect.height == 0 ||
2884                                  srf_updates[i].surface->dst_rect.width == 0 ||
2885                                  srf_updates[i].surface->dst_rect.height == 0) &&
2886                                 (!srf_updates[i].scaling_info ||
2887                                   srf_updates[i].scaling_info->src_rect.width == 0 ||
2888                                   srf_updates[i].scaling_info->src_rect.height == 0 ||
2889                                   srf_updates[i].scaling_info->dst_rect.width == 0 ||
2890                                   srf_updates[i].scaling_info->dst_rect.height == 0)) {
2891                                 DC_ERROR("Invalid src/dst rects in surface update!\n");
2892                                 return false;
2893                         }
2894                 }
2895         }
2896
2897         if (update_type >= update_surface_trace_level)
2898                 update_surface_trace(dc, srf_updates, surface_count);
2899
2900         if (update_type >= UPDATE_TYPE_FULL) {
2901                 struct dc_plane_state *new_planes[MAX_SURFACES] = {0};
2902
2903                 for (i = 0; i < surface_count; i++)
2904                         new_planes[i] = srf_updates[i].surface;
2905
2906                 /* initialize scratch memory for building context */
2907                 context = dc_create_state(dc);
2908                 if (context == NULL) {
2909                         DC_ERROR("Failed to allocate new validate context!\n");
2910                         return false;
2911                 }
2912
2913                 dc_resource_state_copy_construct(
2914                                 dc->current_state, context);
2915
2916                 /*remove old surfaces from context */
2917                 if (!dc_rem_all_planes_for_stream(dc, stream, context)) {
2918
2919                         BREAK_TO_DEBUGGER();
2920                         goto fail;
2921                 }
2922
2923                 /* add surface to context */
2924                 if (!dc_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) {
2925
2926                         BREAK_TO_DEBUGGER();
2927                         goto fail;
2928                 }
2929         }
2930
2931         /* save update parameters into surface */
2932         for (i = 0; i < surface_count; i++) {
2933                 struct dc_plane_state *surface = srf_updates[i].surface;
2934
2935                 copy_surface_update_to_plane(surface, &srf_updates[i]);
2936
2937                 if (update_type >= UPDATE_TYPE_MED) {
2938                         for (j = 0; j < dc->res_pool->pipe_count; j++) {
2939                                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2940
2941                                 if (pipe_ctx->plane_state != surface)
2942                                         continue;
2943
2944                                 resource_build_scaling_params(pipe_ctx);
2945                         }
2946                 }
2947         }
2948
2949         if (update_type == UPDATE_TYPE_FULL) {
2950                 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
2951                         BREAK_TO_DEBUGGER();
2952                         goto fail;
2953                 }
2954         }
2955
2956         *new_context = context;
2957         *new_update_type = update_type;
2958
2959         return true;
2960
2961 fail:
2962         dc_release_state(context);
2963
2964         return false;
2965
2966 }
2967
2968 static void commit_planes_do_stream_update(struct dc *dc,
2969                 struct dc_stream_state *stream,
2970                 struct dc_stream_update *stream_update,
2971                 enum surface_update_type update_type,
2972                 struct dc_state *context)
2973 {
2974         int j;
2975
2976         // Stream updates
2977         for (j = 0; j < dc->res_pool->pipe_count; j++) {
2978                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2979
2980                 if (!pipe_ctx->top_pipe &&  !pipe_ctx->prev_odm_pipe && pipe_ctx->stream == stream) {
2981
2982                         if (stream_update->periodic_interrupt0 &&
2983                                         dc->hwss.setup_periodic_interrupt)
2984                                 dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE0);
2985
2986                         if (stream_update->periodic_interrupt1 &&
2987                                         dc->hwss.setup_periodic_interrupt)
2988                                 dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE1);
2989
2990                         if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) ||
2991                                         stream_update->vrr_infopacket ||
2992                                         stream_update->vsc_infopacket ||
2993                                         stream_update->vsp_infopacket ||
2994                                         stream_update->hfvsif_infopacket ||
2995                                         stream_update->vtem_infopacket) {
2996                                 resource_build_info_frame(pipe_ctx);
2997                                 dc->hwss.update_info_frame(pipe_ctx);
2998
2999                                 if (dc_is_dp_signal(pipe_ctx->stream->signal))
3000                                         dp_source_sequence_trace(pipe_ctx->stream->link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
3001                         }
3002
3003                         if (stream_update->hdr_static_metadata &&
3004                                         stream->use_dynamic_meta &&
3005                                         dc->hwss.set_dmdata_attributes &&
3006                                         pipe_ctx->stream->dmdata_address.quad_part != 0)
3007                                 dc->hwss.set_dmdata_attributes(pipe_ctx);
3008
3009                         if (stream_update->gamut_remap)
3010                                 dc_stream_set_gamut_remap(dc, stream);
3011
3012                         if (stream_update->output_csc_transform)
3013                                 dc_stream_program_csc_matrix(dc, stream);
3014
3015                         if (stream_update->dither_option) {
3016                                 struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
3017                                 resource_build_bit_depth_reduction_params(pipe_ctx->stream,
3018                                                                         &pipe_ctx->stream->bit_depth_params);
3019                                 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp,
3020                                                 &stream->bit_depth_params,
3021                                                 &stream->clamping);
3022                                 while (odm_pipe) {
3023                                         odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp,
3024                                                         &stream->bit_depth_params,
3025                                                         &stream->clamping);
3026                                         odm_pipe = odm_pipe->next_odm_pipe;
3027                                 }
3028                         }
3029
3030
3031                         /* Full fe update*/
3032                         if (update_type == UPDATE_TYPE_FAST)
3033                                 continue;
3034
3035                         if (stream_update->dsc_config)
3036                                 dp_update_dsc_config(pipe_ctx);
3037
3038                         if (stream_update->mst_bw_update) {
3039                                 if (stream_update->mst_bw_update->is_increase)
3040                                         dc_link_increase_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw);
3041                                 else
3042                                         dc_link_reduce_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw);
3043                         }
3044
3045                         if (stream_update->pending_test_pattern) {
3046                                 dc_link_dp_set_test_pattern(stream->link,
3047                                         stream->test_pattern.type,
3048                                         stream->test_pattern.color_space,
3049                                         stream->test_pattern.p_link_settings,
3050                                         stream->test_pattern.p_custom_pattern,
3051                                         stream->test_pattern.cust_pattern_size);
3052                         }
3053
3054                         if (stream_update->dpms_off) {
3055                                 if (*stream_update->dpms_off) {
3056                                         core_link_disable_stream(pipe_ctx);
3057                                         /* for dpms, keep acquired resources*/
3058                                         if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only)
3059                                                 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
3060
3061                                         dc->optimized_required = true;
3062
3063                                 } else {
3064                                         if (get_seamless_boot_stream_count(context) == 0)
3065                                                 dc->hwss.prepare_bandwidth(dc, dc->current_state);
3066
3067                                         core_link_enable_stream(dc->current_state, pipe_ctx);
3068                                 }
3069                         }
3070
3071                         if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
3072                                 bool should_program_abm = true;
3073
3074                                 // if otg funcs defined check if blanked before programming
3075                                 if (pipe_ctx->stream_res.tg->funcs->is_blanked)
3076                                         if (pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
3077                                                 should_program_abm = false;
3078
3079                                 if (should_program_abm) {
3080                                         if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) {
3081                                                 dc->hwss.set_abm_immediate_disable(pipe_ctx);
3082                                         } else {
3083                                                 pipe_ctx->stream_res.abm->funcs->set_abm_level(
3084                                                         pipe_ctx->stream_res.abm, stream->abm_level);
3085                                         }
3086                                 }
3087                         }
3088                 }
3089         }
3090 }
3091
3092 static bool dc_dmub_should_send_dirty_rect_cmd(struct dc *dc, struct dc_stream_state *stream)
3093 {
3094         if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
3095                 return true;
3096
3097         if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_1 &&
3098             dc->debug.enable_sw_cntl_psr)
3099                 return true;
3100
3101         return false;
3102 }
3103
3104 void dc_dmub_update_dirty_rect(struct dc *dc,
3105                                int surface_count,
3106                                struct dc_stream_state *stream,
3107                                struct dc_surface_update *srf_updates,
3108                                struct dc_state *context)
3109 {
3110         union dmub_rb_cmd cmd;
3111         struct dc_context *dc_ctx = dc->ctx;
3112         struct dmub_cmd_update_dirty_rect_data *update_dirty_rect;
3113         unsigned int i, j;
3114         unsigned int panel_inst = 0;
3115
3116         if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream))
3117                 return;
3118
3119         if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst))
3120                 return;
3121
3122         memset(&cmd, 0x0, sizeof(cmd));
3123         cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT;
3124         cmd.update_dirty_rect.header.sub_type = 0;
3125         cmd.update_dirty_rect.header.payload_bytes =
3126                 sizeof(cmd.update_dirty_rect) -
3127                 sizeof(cmd.update_dirty_rect.header);
3128         update_dirty_rect = &cmd.update_dirty_rect.update_dirty_rect_data;
3129         for (i = 0; i < surface_count; i++) {
3130                 struct dc_plane_state *plane_state = srf_updates[i].surface;
3131                 const struct dc_flip_addrs *flip_addr = srf_updates[i].flip_addr;
3132
3133                 if (!srf_updates[i].surface || !flip_addr)
3134                         continue;
3135                 /* Do not send in immediate flip mode */
3136                 if (srf_updates[i].surface->flip_immediate)
3137                         continue;
3138
3139                 update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count;
3140                 memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects,
3141                                 sizeof(flip_addr->dirty_rects));
3142                 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3143                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3144
3145                         if (pipe_ctx->stream != stream)
3146                                 continue;
3147                         if (pipe_ctx->plane_state != plane_state)
3148                                 continue;
3149
3150                         update_dirty_rect->panel_inst = panel_inst;
3151                         update_dirty_rect->pipe_idx = j;
3152                         dc_dmub_srv_cmd_queue(dc_ctx->dmub_srv, &cmd);
3153                         dc_dmub_srv_cmd_execute(dc_ctx->dmub_srv);
3154                 }
3155         }
3156 }
3157
3158 static void commit_planes_for_stream(struct dc *dc,
3159                 struct dc_surface_update *srf_updates,
3160                 int surface_count,
3161                 struct dc_stream_state *stream,
3162                 struct dc_stream_update *stream_update,
3163                 enum surface_update_type update_type,
3164                 struct dc_state *context)
3165 {
3166         int i, j;
3167         struct pipe_ctx *top_pipe_to_program = NULL;
3168         bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST);
3169         bool subvp_prev_use = false;
3170
3171         // Once we apply the new subvp context to hardware it won't be in the
3172         // dc->current_state anymore, so we have to cache it before we apply
3173         // the new SubVP context
3174         subvp_prev_use = false;
3175
3176
3177         dc_z10_restore(dc);
3178
3179         if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) {
3180                 /* Optimize seamless boot flag keeps clocks and watermarks high until
3181                  * first flip. After first flip, optimization is required to lower
3182                  * bandwidth. Important to note that it is expected UEFI will
3183                  * only light up a single display on POST, therefore we only expect
3184                  * one stream with seamless boot flag set.
3185                  */
3186                 if (stream->apply_seamless_boot_optimization) {
3187                         stream->apply_seamless_boot_optimization = false;
3188
3189                         if (get_seamless_boot_stream_count(context) == 0)
3190                                 dc->optimized_required = true;
3191                 }
3192         }
3193
3194         if (update_type == UPDATE_TYPE_FULL) {
3195                 dc_allow_idle_optimizations(dc, false);
3196
3197                 if (get_seamless_boot_stream_count(context) == 0)
3198                         dc->hwss.prepare_bandwidth(dc, context);
3199
3200                 context_clock_trace(dc, context);
3201         }
3202
3203         for (j = 0; j < dc->res_pool->pipe_count; j++) {
3204                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3205
3206                 if (!pipe_ctx->top_pipe &&
3207                         !pipe_ctx->prev_odm_pipe &&
3208                         pipe_ctx->stream &&
3209                         pipe_ctx->stream == stream) {
3210                         top_pipe_to_program = pipe_ctx;
3211                 }
3212         }
3213
3214         for (i = 0; i < dc->res_pool->pipe_count; i++) {
3215                 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
3216
3217                 // Check old context for SubVP
3218                 subvp_prev_use |= (old_pipe->stream && old_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM);
3219                 if (subvp_prev_use)
3220                         break;
3221         }
3222
3223         if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) {
3224                 struct pipe_ctx *mpcc_pipe;
3225                 struct pipe_ctx *odm_pipe;
3226
3227                 for (mpcc_pipe = top_pipe_to_program; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe)
3228                         for (odm_pipe = mpcc_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
3229                                 odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU;
3230         }
3231
3232         if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) {
3233                 if (top_pipe_to_program &&
3234                         top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
3235                         if (should_use_dmub_lock(stream->link)) {
3236                                 union dmub_hw_lock_flags hw_locks = { 0 };
3237                                 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
3238
3239                                 hw_locks.bits.lock_dig = 1;
3240                                 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
3241
3242                                 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
3243                                                         true,
3244                                                         &hw_locks,
3245                                                         &inst_flags);
3246                         } else
3247                                 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable(
3248                                                 top_pipe_to_program->stream_res.tg);
3249                 }
3250         }
3251
3252         if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3253                 if (dc->hwss.subvp_pipe_control_lock)
3254                                 dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, NULL, subvp_prev_use);
3255                 dc->hwss.interdependent_update_lock(dc, context, true);
3256
3257         } else {
3258                 if (dc->hwss.subvp_pipe_control_lock)
3259                         dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
3260                 /* Lock the top pipe while updating plane addrs, since freesync requires
3261                  *  plane addr update event triggers to be synchronized.
3262                  *  top_pipe_to_program is expected to never be NULL
3263                  */
3264                 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
3265         }
3266
3267         if (update_type != UPDATE_TYPE_FAST) {
3268                 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3269                         struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
3270
3271                         if ((new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) ||
3272                                         subvp_prev_use) {
3273                                 // If old context or new context has phantom pipes, apply
3274                                 // the phantom timings now. We can't change the phantom
3275                                 // pipe configuration safely without driver acquiring
3276                                 // the DMCUB lock first.
3277                                 dc->hwss.apply_ctx_to_hw(dc, context);
3278                                 break;
3279                         }
3280                 }
3281         }
3282
3283         dc_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context);
3284
3285         if (update_type != UPDATE_TYPE_FAST) {
3286                 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3287                         struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
3288
3289                         if ((new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) ||
3290                                         subvp_prev_use) {
3291                                 // If old context or new context has phantom pipes, apply
3292                                 // the phantom timings now. We can't change the phantom
3293                                 // pipe configuration safely without driver acquiring
3294                                 // the DMCUB lock first.
3295                                 dc->hwss.apply_ctx_to_hw(dc, context);
3296                                 break;
3297                         }
3298                 }
3299         }
3300
3301         // Stream updates
3302         if (stream_update)
3303                 commit_planes_do_stream_update(dc, stream, stream_update, update_type, context);
3304
3305         if (surface_count == 0) {
3306                 /*
3307                  * In case of turning off screen, no need to program front end a second time.
3308                  * just return after program blank.
3309                  */
3310                 if (dc->hwss.apply_ctx_for_surface)
3311                         dc->hwss.apply_ctx_for_surface(dc, stream, 0, context);
3312                 if (dc->hwss.program_front_end_for_ctx)
3313                         dc->hwss.program_front_end_for_ctx(dc, context);
3314
3315                 if (update_type != UPDATE_TYPE_FAST)
3316                         if (dc->hwss.commit_subvp_config)
3317                                 dc->hwss.commit_subvp_config(dc, context);
3318
3319                 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3320                         dc->hwss.interdependent_update_lock(dc, context, false);
3321                 } else {
3322                         dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
3323                 }
3324                 dc->hwss.post_unlock_program_front_end(dc, context);
3325
3326                 /* Since phantom pipe programming is moved to post_unlock_program_front_end,
3327                  * move the SubVP lock to after the phantom pipes have been setup
3328                  */
3329                 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3330                         if (dc->hwss.subvp_pipe_control_lock)
3331                                 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
3332                 } else {
3333                         if (dc->hwss.subvp_pipe_control_lock)
3334                                 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
3335                 }
3336                 return;
3337         }
3338
3339         if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
3340                 for (i = 0; i < surface_count; i++) {
3341                         struct dc_plane_state *plane_state = srf_updates[i].surface;
3342                         /*set logical flag for lock/unlock use*/
3343                         for (j = 0; j < dc->res_pool->pipe_count; j++) {
3344                                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3345                                 if (!pipe_ctx->plane_state)
3346                                         continue;
3347                                 if (should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3348                                         continue;
3349                                 pipe_ctx->plane_state->triplebuffer_flips = false;
3350                                 if (update_type == UPDATE_TYPE_FAST &&
3351                                         dc->hwss.program_triplebuffer != NULL &&
3352                                         !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
3353                                                 /*triple buffer for VUpdate  only*/
3354                                                 pipe_ctx->plane_state->triplebuffer_flips = true;
3355                                 }
3356                         }
3357                         if (update_type == UPDATE_TYPE_FULL) {
3358                                 /* force vsync flip when reconfiguring pipes to prevent underflow */
3359                                 plane_state->flip_immediate = false;
3360                         }
3361                 }
3362         }
3363
3364         // Update Type FULL, Surface updates
3365         for (j = 0; j < dc->res_pool->pipe_count; j++) {
3366                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3367
3368                 if (!pipe_ctx->top_pipe &&
3369                         !pipe_ctx->prev_odm_pipe &&
3370                         should_update_pipe_for_stream(context, pipe_ctx, stream)) {
3371                         struct dc_stream_status *stream_status = NULL;
3372
3373                         if (!pipe_ctx->plane_state)
3374                                 continue;
3375
3376                         /* Full fe update*/
3377                         if (update_type == UPDATE_TYPE_FAST)
3378                                 continue;
3379
3380                         ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
3381
3382                         if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
3383                                 /*turn off triple buffer for full update*/
3384                                 dc->hwss.program_triplebuffer(
3385                                         dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
3386                         }
3387                         stream_status =
3388                                 stream_get_status(context, pipe_ctx->stream);
3389
3390                         if (dc->hwss.apply_ctx_for_surface)
3391                                 dc->hwss.apply_ctx_for_surface(
3392                                         dc, pipe_ctx->stream, stream_status->plane_count, context);
3393                 }
3394         }
3395         if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) {
3396                 dc->hwss.program_front_end_for_ctx(dc, context);
3397                 if (dc->debug.validate_dml_output) {
3398                         for (i = 0; i < dc->res_pool->pipe_count; i++) {
3399                                 struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i];
3400                                 if (cur_pipe->stream == NULL)
3401                                         continue;
3402
3403                                 cur_pipe->plane_res.hubp->funcs->validate_dml_output(
3404                                                 cur_pipe->plane_res.hubp, dc->ctx,
3405                                                 &context->res_ctx.pipe_ctx[i].rq_regs,
3406                                                 &context->res_ctx.pipe_ctx[i].dlg_regs,
3407                                                 &context->res_ctx.pipe_ctx[i].ttu_regs);
3408                         }
3409                 }
3410         }
3411
3412         // Update Type FAST, Surface updates
3413         if (update_type == UPDATE_TYPE_FAST) {
3414                 if (dc->hwss.set_flip_control_gsl)
3415                         for (i = 0; i < surface_count; i++) {
3416                                 struct dc_plane_state *plane_state = srf_updates[i].surface;
3417
3418                                 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3419                                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3420
3421                                         if (!should_update_pipe_for_stream(context, pipe_ctx, stream))
3422                                                 continue;
3423
3424                                         if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3425                                                 continue;
3426
3427                                         // GSL has to be used for flip immediate
3428                                         dc->hwss.set_flip_control_gsl(pipe_ctx,
3429                                                         pipe_ctx->plane_state->flip_immediate);
3430                                 }
3431                         }
3432
3433                 /* Perform requested Updates */
3434                 for (i = 0; i < surface_count; i++) {
3435                         struct dc_plane_state *plane_state = srf_updates[i].surface;
3436
3437                         for (j = 0; j < dc->res_pool->pipe_count; j++) {
3438                                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3439
3440                                 if (!should_update_pipe_for_stream(context, pipe_ctx, stream))
3441                                         continue;
3442
3443                                 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3444                                         continue;
3445
3446                                 /*program triple buffer after lock based on flip type*/
3447                                 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
3448                                         /*only enable triplebuffer for  fast_update*/
3449                                         dc->hwss.program_triplebuffer(
3450                                                 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
3451                                 }
3452                                 if (pipe_ctx->plane_state->update_flags.bits.addr_update)
3453                                         dc->hwss.update_plane_addr(dc, pipe_ctx);
3454                         }
3455                 }
3456
3457         }
3458
3459         if (update_type != UPDATE_TYPE_FAST)
3460                 if (dc->hwss.commit_subvp_config)
3461                         dc->hwss.commit_subvp_config(dc, context);
3462
3463         if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3464                 dc->hwss.interdependent_update_lock(dc, context, false);
3465         } else {
3466                 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
3467         }
3468
3469         if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) {
3470                 if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
3471                         top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3472                                 top_pipe_to_program->stream_res.tg,
3473                                 CRTC_STATE_VACTIVE);
3474                         top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3475                                 top_pipe_to_program->stream_res.tg,
3476                                 CRTC_STATE_VBLANK);
3477                         top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3478                                 top_pipe_to_program->stream_res.tg,
3479                                 CRTC_STATE_VACTIVE);
3480
3481                         if (should_use_dmub_lock(stream->link)) {
3482                                 union dmub_hw_lock_flags hw_locks = { 0 };
3483                                 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
3484
3485                                 hw_locks.bits.lock_dig = 1;
3486                                 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
3487
3488                                 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
3489                                                         false,
3490                                                         &hw_locks,
3491                                                         &inst_flags);
3492                         } else
3493                                 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable(
3494                                         top_pipe_to_program->stream_res.tg);
3495                 }
3496         }
3497
3498         if (update_type != UPDATE_TYPE_FAST) {
3499                 dc->hwss.post_unlock_program_front_end(dc, context);
3500
3501                 /* Since phantom pipe programming is moved to post_unlock_program_front_end,
3502                  * move the SubVP lock to after the phantom pipes have been setup
3503                  */
3504                 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3505                         if (dc->hwss.subvp_pipe_control_lock)
3506                                 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
3507                 } else {
3508                         if (dc->hwss.subvp_pipe_control_lock)
3509                                 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
3510                 }
3511         }
3512
3513         // Fire manual trigger only when bottom plane is flipped
3514         for (j = 0; j < dc->res_pool->pipe_count; j++) {
3515                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3516
3517                 if (!pipe_ctx->plane_state)
3518                         continue;
3519
3520                 if (pipe_ctx->bottom_pipe || pipe_ctx->next_odm_pipe ||
3521                                 !pipe_ctx->stream || !should_update_pipe_for_stream(context, pipe_ctx, stream) ||
3522                                 !pipe_ctx->plane_state->update_flags.bits.addr_update ||
3523                                 pipe_ctx->plane_state->skip_manual_trigger)
3524                         continue;
3525
3526                 if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger)
3527                         pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg);
3528         }
3529 }
3530
3531 static bool commit_minimal_transition_state(struct dc *dc,
3532                 struct dc_state *transition_base_context)
3533 {
3534         struct dc_state *transition_context = dc_create_state(dc);
3535         enum pipe_split_policy tmp_policy;
3536         enum dc_status ret = DC_ERROR_UNEXPECTED;
3537         unsigned int i, j;
3538
3539         if (!transition_context)
3540                 return false;
3541
3542         tmp_policy = dc->debug.pipe_split_policy;
3543         dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
3544
3545         dc_resource_state_copy_construct(transition_base_context, transition_context);
3546
3547         //commit minimal state
3548         if (dc->res_pool->funcs->validate_bandwidth(dc, transition_context, false)) {
3549                 for (i = 0; i < transition_context->stream_count; i++) {
3550                         struct dc_stream_status *stream_status = &transition_context->stream_status[i];
3551
3552                         for (j = 0; j < stream_status->plane_count; j++) {
3553                                 struct dc_plane_state *plane_state = stream_status->plane_states[j];
3554
3555                                 /* force vsync flip when reconfiguring pipes to prevent underflow
3556                                  * and corruption
3557                                  */
3558                                 plane_state->flip_immediate = false;
3559                         }
3560                 }
3561
3562                 ret = dc_commit_state_no_check(dc, transition_context);
3563         }
3564
3565         //always release as dc_commit_state_no_check retains in good case
3566         dc_release_state(transition_context);
3567
3568         //restore previous pipe split policy
3569         dc->debug.pipe_split_policy = tmp_policy;
3570
3571         if (ret != DC_OK) {
3572                 //this should never happen
3573                 BREAK_TO_DEBUGGER();
3574                 return false;
3575         }
3576
3577         //force full surface update
3578         for (i = 0; i < dc->current_state->stream_count; i++) {
3579                 for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) {
3580                         dc->current_state->stream_status[i].plane_states[j]->update_flags.raw = 0xFFFFFFFF;
3581                 }
3582         }
3583
3584         return true;
3585 }
3586
3587 bool dc_update_planes_and_stream(struct dc *dc,
3588                 struct dc_surface_update *srf_updates, int surface_count,
3589                 struct dc_stream_state *stream,
3590                 struct dc_stream_update *stream_update)
3591 {
3592         struct dc_state *context;
3593         enum surface_update_type update_type;
3594         int i;
3595
3596         /* In cases where MPO and split or ODM are used transitions can
3597          * cause underflow. Apply stream configuration with minimal pipe
3598          * split first to avoid unsupported transitions for active pipes.
3599          */
3600         bool force_minimal_pipe_splitting = false;
3601         bool is_plane_addition = false;
3602
3603         struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream);
3604
3605         if (cur_stream_status &&
3606                         dc->current_state->stream_count > 0 &&
3607                         dc->debug.pipe_split_policy != MPC_SPLIT_AVOID) {
3608                 /* determine if minimal transition is required */
3609                 if (cur_stream_status->plane_count > surface_count) {
3610                         force_minimal_pipe_splitting = true;
3611                 } else if (cur_stream_status->plane_count < surface_count) {
3612                         force_minimal_pipe_splitting = true;
3613                         is_plane_addition = true;
3614                 }
3615         }
3616
3617         /* on plane addition, minimal state is the current one */
3618         if (force_minimal_pipe_splitting && is_plane_addition &&
3619                 !commit_minimal_transition_state(dc, dc->current_state))
3620                                 return false;
3621
3622         if (!update_planes_and_stream_state(
3623                         dc,
3624                         srf_updates,
3625                         surface_count,
3626                         stream,
3627                         stream_update,
3628                         &update_type,
3629                         &context))
3630                 return false;
3631
3632         /* on plane addition, minimal state is the new one */
3633         if (force_minimal_pipe_splitting && !is_plane_addition) {
3634                 if (!commit_minimal_transition_state(dc, context)) {
3635                         dc_release_state(context);
3636                         return false;
3637                 }
3638
3639                 update_type = UPDATE_TYPE_FULL;
3640         }
3641
3642         commit_planes_for_stream(
3643                         dc,
3644                         srf_updates,
3645                         surface_count,
3646                         stream,
3647                         stream_update,
3648                         update_type,
3649                         context);
3650
3651         if (dc->current_state != context) {
3652
3653                 /* Since memory free requires elevated IRQL, an interrupt
3654                  * request is generated by mem free. If this happens
3655                  * between freeing and reassigning the context, our vsync
3656                  * interrupt will call into dc and cause a memory
3657                  * corruption BSOD. Hence, we first reassign the context,
3658                  * then free the old context.
3659                  */
3660
3661                 struct dc_state *old = dc->current_state;
3662
3663                 dc->current_state = context;
3664                 dc_release_state(old);
3665
3666                 // clear any forced full updates
3667                 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3668                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
3669
3670                         if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
3671                                 pipe_ctx->plane_state->force_full_update = false;
3672                 }
3673         }
3674         return true;
3675 }
3676
3677 void dc_commit_updates_for_stream(struct dc *dc,
3678                 struct dc_surface_update *srf_updates,
3679                 int surface_count,
3680                 struct dc_stream_state *stream,
3681                 struct dc_stream_update *stream_update,
3682                 struct dc_state *state)
3683 {
3684         const struct dc_stream_status *stream_status;
3685         enum surface_update_type update_type;
3686         struct dc_state *context;
3687         struct dc_context *dc_ctx = dc->ctx;
3688         int i, j;
3689
3690         stream_status = dc_stream_get_status(stream);
3691         context = dc->current_state;
3692
3693         update_type = dc_check_update_surfaces_for_stream(
3694                                 dc, srf_updates, surface_count, stream_update, stream_status);
3695
3696         if (update_type >= update_surface_trace_level)
3697                 update_surface_trace(dc, srf_updates, surface_count);
3698
3699
3700         if (update_type >= UPDATE_TYPE_FULL) {
3701
3702                 /* initialize scratch memory for building context */
3703                 context = dc_create_state(dc);
3704                 if (context == NULL) {
3705                         DC_ERROR("Failed to allocate new validate context!\n");
3706                         return;
3707                 }
3708
3709                 dc_resource_state_copy_construct(state, context);
3710
3711                 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3712                         struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
3713                         struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
3714
3715                         if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
3716                                 new_pipe->plane_state->force_full_update = true;
3717                 }
3718         } else if (update_type == UPDATE_TYPE_FAST && dc_ctx->dce_version >= DCE_VERSION_MAX) {
3719                 /*
3720                  * Previous frame finished and HW is ready for optimization.
3721                  *
3722                  * Only relevant for DCN behavior where we can guarantee the optimization
3723                  * is safe to apply - retain the legacy behavior for DCE.
3724                  */
3725                 dc_post_update_surfaces_to_stream(dc);
3726         }
3727
3728
3729         for (i = 0; i < surface_count; i++) {
3730                 struct dc_plane_state *surface = srf_updates[i].surface;
3731
3732                 copy_surface_update_to_plane(surface, &srf_updates[i]);
3733
3734                 if (update_type >= UPDATE_TYPE_MED) {
3735                         for (j = 0; j < dc->res_pool->pipe_count; j++) {
3736                                 struct pipe_ctx *pipe_ctx =
3737                                         &context->res_ctx.pipe_ctx[j];
3738
3739                                 if (pipe_ctx->plane_state != surface)
3740                                         continue;
3741
3742                                 resource_build_scaling_params(pipe_ctx);
3743                         }
3744                 }
3745         }
3746
3747         copy_stream_update_to_stream(dc, context, stream, stream_update);
3748
3749         if (update_type >= UPDATE_TYPE_FULL) {
3750                 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
3751                         DC_ERROR("Mode validation failed for stream update!\n");
3752                         dc_release_state(context);
3753                         return;
3754                 }
3755         }
3756
3757         TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
3758
3759         commit_planes_for_stream(
3760                                 dc,
3761                                 srf_updates,
3762                                 surface_count,
3763                                 stream,
3764                                 stream_update,
3765                                 update_type,
3766                                 context);
3767         /*update current_State*/
3768         if (dc->current_state != context) {
3769
3770                 struct dc_state *old = dc->current_state;
3771
3772                 dc->current_state = context;
3773                 dc_release_state(old);
3774
3775                 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3776                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
3777
3778                         if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
3779                                 pipe_ctx->plane_state->force_full_update = false;
3780                 }
3781         }
3782
3783         /* Legacy optimization path for DCE. */
3784         if (update_type >= UPDATE_TYPE_FULL && dc_ctx->dce_version < DCE_VERSION_MAX) {
3785                 dc_post_update_surfaces_to_stream(dc);
3786                 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
3787         }
3788
3789         return;
3790
3791 }
3792
3793 uint8_t dc_get_current_stream_count(struct dc *dc)
3794 {
3795         return dc->current_state->stream_count;
3796 }
3797
3798 struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i)
3799 {
3800         if (i < dc->current_state->stream_count)
3801                 return dc->current_state->streams[i];
3802         return NULL;
3803 }
3804
3805 enum dc_irq_source dc_interrupt_to_irq_source(
3806                 struct dc *dc,
3807                 uint32_t src_id,
3808                 uint32_t ext_id)
3809 {
3810         return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
3811 }
3812
3813 /*
3814  * dc_interrupt_set() - Enable/disable an AMD hw interrupt source
3815  */
3816 bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
3817 {
3818
3819         if (dc == NULL)
3820                 return false;
3821
3822         return dal_irq_service_set(dc->res_pool->irqs, src, enable);
3823 }
3824
3825 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
3826 {
3827         dal_irq_service_ack(dc->res_pool->irqs, src);
3828 }
3829
3830 void dc_power_down_on_boot(struct dc *dc)
3831 {
3832         if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW &&
3833                         dc->hwss.power_down_on_boot)
3834                 dc->hwss.power_down_on_boot(dc);
3835 }
3836
3837 void dc_set_power_state(
3838         struct dc *dc,
3839         enum dc_acpi_cm_power_state power_state)
3840 {
3841         struct kref refcount;
3842         struct display_mode_lib *dml;
3843
3844         if (!dc->current_state)
3845                 return;
3846
3847         switch (power_state) {
3848         case DC_ACPI_CM_POWER_STATE_D0:
3849                 dc_resource_state_construct(dc, dc->current_state);
3850
3851                 dc_z10_restore(dc);
3852
3853                 if (dc->ctx->dmub_srv)
3854                         dc_dmub_srv_wait_phy_init(dc->ctx->dmub_srv);
3855
3856                 dc->hwss.init_hw(dc);
3857
3858                 if (dc->hwss.init_sys_ctx != NULL &&
3859                         dc->vm_pa_config.valid) {
3860                         dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config);
3861                 }
3862
3863                 break;
3864         default:
3865                 ASSERT(dc->current_state->stream_count == 0);
3866                 /* Zero out the current context so that on resume we start with
3867                  * clean state, and dc hw programming optimizations will not
3868                  * cause any trouble.
3869                  */
3870                 dml = kzalloc(sizeof(struct display_mode_lib),
3871                                 GFP_KERNEL);
3872
3873                 ASSERT(dml);
3874                 if (!dml)
3875                         return;
3876
3877                 /* Preserve refcount */
3878                 refcount = dc->current_state->refcount;
3879                 /* Preserve display mode lib */
3880                 memcpy(dml, &dc->current_state->bw_ctx.dml, sizeof(struct display_mode_lib));
3881
3882                 dc_resource_state_destruct(dc->current_state);
3883                 memset(dc->current_state, 0,
3884                                 sizeof(*dc->current_state));
3885
3886                 dc->current_state->refcount = refcount;
3887                 dc->current_state->bw_ctx.dml = *dml;
3888
3889                 kfree(dml);
3890
3891                 break;
3892         }
3893 }
3894
3895 void dc_resume(struct dc *dc)
3896 {
3897         uint32_t i;
3898
3899         for (i = 0; i < dc->link_count; i++)
3900                 core_link_resume(dc->links[i]);
3901 }
3902
3903 bool dc_is_dmcu_initialized(struct dc *dc)
3904 {
3905         struct dmcu *dmcu = dc->res_pool->dmcu;
3906
3907         if (dmcu)
3908                 return dmcu->funcs->is_dmcu_initialized(dmcu);
3909         return false;
3910 }
3911
3912 bool dc_is_oem_i2c_device_present(
3913         struct dc *dc,
3914         size_t slave_address)
3915 {
3916         if (dc->res_pool->oem_device)
3917                 return dce_i2c_oem_device_present(
3918                         dc->res_pool,
3919                         dc->res_pool->oem_device,
3920                         slave_address);
3921
3922         return false;
3923 }
3924
3925 bool dc_submit_i2c(
3926                 struct dc *dc,
3927                 uint32_t link_index,
3928                 struct i2c_command *cmd)
3929 {
3930
3931         struct dc_link *link = dc->links[link_index];
3932         struct ddc_service *ddc = link->ddc;
3933         return dce_i2c_submit_command(
3934                 dc->res_pool,
3935                 ddc->ddc_pin,
3936                 cmd);
3937 }
3938
3939 bool dc_submit_i2c_oem(
3940                 struct dc *dc,
3941                 struct i2c_command *cmd)
3942 {
3943         struct ddc_service *ddc = dc->res_pool->oem_device;
3944         if (ddc)
3945                 return dce_i2c_submit_command(
3946                         dc->res_pool,
3947                         ddc->ddc_pin,
3948                         cmd);
3949
3950         return false;
3951 }
3952
3953 static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink)
3954 {
3955         if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
3956                 BREAK_TO_DEBUGGER();
3957                 return false;
3958         }
3959
3960         dc_sink_retain(sink);
3961
3962         dc_link->remote_sinks[dc_link->sink_count] = sink;
3963         dc_link->sink_count++;
3964
3965         return true;
3966 }
3967
3968 /*
3969  * dc_link_add_remote_sink() - Create a sink and attach it to an existing link
3970  *
3971  * EDID length is in bytes
3972  */
3973 struct dc_sink *dc_link_add_remote_sink(
3974                 struct dc_link *link,
3975                 const uint8_t *edid,
3976                 int len,
3977                 struct dc_sink_init_data *init_data)
3978 {
3979         struct dc_sink *dc_sink;
3980         enum dc_edid_status edid_status;
3981
3982         if (len > DC_MAX_EDID_BUFFER_SIZE) {
3983                 dm_error("Max EDID buffer size breached!\n");
3984                 return NULL;
3985         }
3986
3987         if (!init_data) {
3988                 BREAK_TO_DEBUGGER();
3989                 return NULL;
3990         }
3991
3992         if (!init_data->link) {
3993                 BREAK_TO_DEBUGGER();
3994                 return NULL;
3995         }
3996
3997         dc_sink = dc_sink_create(init_data);
3998
3999         if (!dc_sink)
4000                 return NULL;
4001
4002         memmove(dc_sink->dc_edid.raw_edid, edid, len);
4003         dc_sink->dc_edid.length = len;
4004
4005         if (!link_add_remote_sink_helper(
4006                         link,
4007                         dc_sink))
4008                 goto fail_add_sink;
4009
4010         edid_status = dm_helpers_parse_edid_caps(
4011                         link,
4012                         &dc_sink->dc_edid,
4013                         &dc_sink->edid_caps);
4014
4015         /*
4016          * Treat device as no EDID device if EDID
4017          * parsing fails
4018          */
4019         if (edid_status != EDID_OK) {
4020                 dc_sink->dc_edid.length = 0;
4021                 dm_error("Bad EDID, status%d!\n", edid_status);
4022         }
4023
4024         return dc_sink;
4025
4026 fail_add_sink:
4027         dc_sink_release(dc_sink);
4028         return NULL;
4029 }
4030
4031 /*
4032  * dc_link_remove_remote_sink() - Remove a remote sink from a dc_link
4033  *
4034  * Note that this just removes the struct dc_sink - it doesn't
4035  * program hardware or alter other members of dc_link
4036  */
4037 void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
4038 {
4039         int i;
4040
4041         if (!link->sink_count) {
4042                 BREAK_TO_DEBUGGER();
4043                 return;
4044         }
4045
4046         for (i = 0; i < link->sink_count; i++) {
4047                 if (link->remote_sinks[i] == sink) {
4048                         dc_sink_release(sink);
4049                         link->remote_sinks[i] = NULL;
4050
4051                         /* shrink array to remove empty place */
4052                         while (i < link->sink_count - 1) {
4053                                 link->remote_sinks[i] = link->remote_sinks[i+1];
4054                                 i++;
4055                         }
4056                         link->remote_sinks[i] = NULL;
4057                         link->sink_count--;
4058                         return;
4059                 }
4060         }
4061 }
4062
4063 void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info)
4064 {
4065         info->displayClock                              = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz;
4066         info->engineClock                               = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_khz;
4067         info->memoryClock                               = (unsigned int)state->bw_ctx.bw.dcn.clk.dramclk_khz;
4068         info->maxSupportedDppClock              = (unsigned int)state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz;
4069         info->dppClock                                  = (unsigned int)state->bw_ctx.bw.dcn.clk.dppclk_khz;
4070         info->socClock                                  = (unsigned int)state->bw_ctx.bw.dcn.clk.socclk_khz;
4071         info->dcfClockDeepSleep                 = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz;
4072         info->fClock                                    = (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz;
4073         info->phyClock                                  = (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz;
4074 }
4075 enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping)
4076 {
4077         if (dc->hwss.set_clock)
4078                 return dc->hwss.set_clock(dc, clock_type, clk_khz, stepping);
4079         return DC_ERROR_UNEXPECTED;
4080 }
4081 void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg)
4082 {
4083         if (dc->hwss.get_clock)
4084                 dc->hwss.get_clock(dc, clock_type, clock_cfg);
4085 }
4086
4087 /* enable/disable eDP PSR without specify stream for eDP */
4088 bool dc_set_psr_allow_active(struct dc *dc, bool enable)
4089 {
4090         int i;
4091         bool allow_active;
4092
4093         for (i = 0; i < dc->current_state->stream_count ; i++) {
4094                 struct dc_link *link;
4095                 struct dc_stream_state *stream = dc->current_state->streams[i];
4096
4097                 link = stream->link;
4098                 if (!link)
4099                         continue;
4100
4101                 if (link->psr_settings.psr_feature_enabled) {
4102                         if (enable && !link->psr_settings.psr_allow_active) {
4103                                 allow_active = true;
4104                                 if (!dc_link_set_psr_allow_active(link, &allow_active, false, false, NULL))
4105                                         return false;
4106                         } else if (!enable && link->psr_settings.psr_allow_active) {
4107                                 allow_active = false;
4108                                 if (!dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL))
4109                                         return false;
4110                         }
4111                 }
4112         }
4113
4114         return true;
4115 }
4116
4117 void dc_allow_idle_optimizations(struct dc *dc, bool allow)
4118 {
4119         if (dc->debug.disable_idle_power_optimizations)
4120                 return;
4121
4122         if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->is_smu_present)
4123                 if (!dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr))
4124                         return;
4125
4126         if (allow == dc->idle_optimizations_allowed)
4127                 return;
4128
4129         if (dc->hwss.apply_idle_power_optimizations && dc->hwss.apply_idle_power_optimizations(dc, allow))
4130                 dc->idle_optimizations_allowed = allow;
4131 }
4132
4133 /* set min and max memory clock to lowest and highest DPM level, respectively */
4134 void dc_unlock_memory_clock_frequency(struct dc *dc)
4135 {
4136         if (dc->clk_mgr->funcs->set_hard_min_memclk)
4137                 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, false);
4138
4139         if (dc->clk_mgr->funcs->set_hard_max_memclk)
4140                 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
4141 }
4142
4143 /* set min memory clock to the min required for current mode, max to maxDPM */
4144 void dc_lock_memory_clock_frequency(struct dc *dc)
4145 {
4146         if (dc->clk_mgr->funcs->get_memclk_states_from_smu)
4147                 dc->clk_mgr->funcs->get_memclk_states_from_smu(dc->clk_mgr);
4148
4149         if (dc->clk_mgr->funcs->set_hard_min_memclk)
4150                 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, true);
4151
4152         if (dc->clk_mgr->funcs->set_hard_max_memclk)
4153                 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
4154 }
4155
4156 static void blank_and_force_memclk(struct dc *dc, bool apply, unsigned int memclk_mhz)
4157 {
4158         struct dc_state *context = dc->current_state;
4159         struct hubp *hubp;
4160         struct pipe_ctx *pipe;
4161         int i;
4162
4163         for (i = 0; i < dc->res_pool->pipe_count; i++) {
4164                 pipe = &context->res_ctx.pipe_ctx[i];
4165
4166                 if (pipe->stream != NULL) {
4167                         dc->hwss.disable_pixel_data(dc, pipe, true);
4168
4169                         // wait for double buffer
4170                         pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE);
4171                         pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VBLANK);
4172                         pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE);
4173
4174                         hubp = pipe->plane_res.hubp;
4175                         hubp->funcs->set_blank_regs(hubp, true);
4176                 }
4177         }
4178
4179         dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, memclk_mhz);
4180         dc->clk_mgr->funcs->set_min_memclk(dc->clk_mgr, memclk_mhz);
4181
4182         for (i = 0; i < dc->res_pool->pipe_count; i++) {
4183                 pipe = &context->res_ctx.pipe_ctx[i];
4184
4185                 if (pipe->stream != NULL) {
4186                         dc->hwss.disable_pixel_data(dc, pipe, false);
4187
4188                         hubp = pipe->plane_res.hubp;
4189                         hubp->funcs->set_blank_regs(hubp, false);
4190                 }
4191         }
4192 }
4193
4194
4195 /**
4196  * dc_enable_dcmode_clk_limit() - lower clocks in dc (battery) mode
4197  * @dc: pointer to dc of the dm calling this
4198  * @enable: True = transition to DC mode, false = transition back to AC mode
4199  *
4200  * Some SoCs define additional clock limits when in DC mode, DM should
4201  * invoke this function when the platform undergoes a power source transition
4202  * so DC can apply/unapply the limit. This interface may be disruptive to
4203  * the onscreen content.
4204  *
4205  * Context: Triggered by OS through DM interface, or manually by escape calls.
4206  * Need to hold a dclock when doing so.
4207  *
4208  * Return: none (void function)
4209  *
4210  */
4211 void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable)
4212 {
4213         uint32_t hw_internal_rev = dc->ctx->asic_id.hw_internal_rev;
4214         unsigned int softMax, maxDPM, funcMin;
4215         bool p_state_change_support;
4216
4217         if (!ASICREV_IS_BEIGE_GOBY_P(hw_internal_rev))
4218                 return;
4219
4220         softMax = dc->clk_mgr->bw_params->dc_mode_softmax_memclk;
4221         maxDPM = dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz;
4222         funcMin = (dc->clk_mgr->clks.dramclk_khz + 999) / 1000;
4223         p_state_change_support = dc->clk_mgr->clks.p_state_change_support;
4224
4225         if (enable && !dc->clk_mgr->dc_mode_softmax_enabled) {
4226                 if (p_state_change_support) {
4227                         if (funcMin <= softMax)
4228                                 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, softMax);
4229                         // else: No-Op
4230                 } else {
4231                         if (funcMin <= softMax)
4232                                 blank_and_force_memclk(dc, true, softMax);
4233                         // else: No-Op
4234                 }
4235         } else if (!enable && dc->clk_mgr->dc_mode_softmax_enabled) {
4236                 if (p_state_change_support) {
4237                         if (funcMin <= softMax)
4238                                 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, maxDPM);
4239                         // else: No-Op
4240                 } else {
4241                         if (funcMin <= softMax)
4242                                 blank_and_force_memclk(dc, true, maxDPM);
4243                         // else: No-Op
4244                 }
4245         }
4246         dc->clk_mgr->dc_mode_softmax_enabled = enable;
4247 }
4248 bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane,
4249                 struct dc_cursor_attributes *cursor_attr)
4250 {
4251         if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, plane, cursor_attr))
4252                 return true;
4253         return false;
4254 }
4255
4256 /* cleanup on driver unload */
4257 void dc_hardware_release(struct dc *dc)
4258 {
4259         dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(dc);
4260
4261         if (dc->hwss.hardware_release)
4262                 dc->hwss.hardware_release(dc);
4263 }
4264
4265 void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc)
4266 {
4267         if (dc->current_state)
4268                 dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching_shut_down = true;
4269 }
4270
4271 /*
4272  *****************************************************************************
4273  * Function: dc_is_dmub_outbox_supported -
4274  * 
4275  * @brief 
4276  *      Checks whether DMUB FW supports outbox notifications, if supported
4277  *              DM should register outbox interrupt prior to actually enabling interrupts
4278  *              via dc_enable_dmub_outbox
4279  *
4280  *  @param
4281  *              [in] dc: dc structure
4282  *
4283  *  @return
4284  *              True if DMUB FW supports outbox notifications, False otherwise
4285  *****************************************************************************
4286  */
4287 bool dc_is_dmub_outbox_supported(struct dc *dc)
4288 {
4289         /* DCN31 B0 USB4 DPIA needs dmub notifications for interrupts */
4290         if (dc->ctx->asic_id.chip_family == FAMILY_YELLOW_CARP &&
4291             dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 &&
4292             !dc->debug.dpia_debug.bits.disable_dpia)
4293                 return true;
4294
4295         if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_2 &&
4296             !dc->debug.dpia_debug.bits.disable_dpia)
4297                 return true;
4298
4299         /* dmub aux needs dmub notifications to be enabled */
4300         return dc->debug.enable_dmub_aux_for_legacy_ddc;
4301 }
4302
4303 /*
4304  *****************************************************************************
4305  *  Function: dc_enable_dmub_notifications
4306  *
4307  *  @brief
4308  *              Calls dc_is_dmub_outbox_supported to check if dmub fw supports outbox
4309  *              notifications. All DMs shall switch to dc_is_dmub_outbox_supported.
4310  *              This API shall be removed after switching.
4311  *
4312  *  @param
4313  *              [in] dc: dc structure
4314  *
4315  *  @return
4316  *              True if DMUB FW supports outbox notifications, False otherwise
4317  *****************************************************************************
4318  */
4319 bool dc_enable_dmub_notifications(struct dc *dc)
4320 {
4321         return dc_is_dmub_outbox_supported(dc);
4322 }
4323
4324 /**
4325  *****************************************************************************
4326  *  Function: dc_enable_dmub_outbox
4327  *
4328  *  @brief
4329  *              Enables DMUB unsolicited notifications to x86 via outbox
4330  *
4331  *  @param
4332  *              [in] dc: dc structure
4333  *
4334  *  @return
4335  *              None
4336  *****************************************************************************
4337  */
4338 void dc_enable_dmub_outbox(struct dc *dc)
4339 {
4340         struct dc_context *dc_ctx = dc->ctx;
4341
4342         dmub_enable_outbox_notification(dc_ctx->dmub_srv);
4343 }
4344
4345 /**
4346  * dc_process_dmub_aux_transfer_async - Submits aux command to dmub via inbox message
4347  *                                      Sets port index appropriately for legacy DDC
4348  * @dc: dc structure
4349  * @link_index: link index
4350  * @payload: aux payload
4351  *
4352  * Returns: True if successful, False if failure
4353  */
4354 bool dc_process_dmub_aux_transfer_async(struct dc *dc,
4355                                 uint32_t link_index,
4356                                 struct aux_payload *payload)
4357 {
4358         uint8_t action;
4359         union dmub_rb_cmd cmd = {0};
4360         struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
4361
4362         ASSERT(payload->length <= 16);
4363
4364         cmd.dp_aux_access.header.type = DMUB_CMD__DP_AUX_ACCESS;
4365         cmd.dp_aux_access.header.payload_bytes = 0;
4366         /* For dpia, ddc_pin is set to NULL */
4367         if (!dc->links[link_index]->ddc->ddc_pin)
4368                 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_DPIA;
4369         else
4370                 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_LEGACY_DDC;
4371
4372         cmd.dp_aux_access.aux_control.instance = dc->links[link_index]->ddc_hw_inst;
4373         cmd.dp_aux_access.aux_control.sw_crc_enabled = 0;
4374         cmd.dp_aux_access.aux_control.timeout = 0;
4375         cmd.dp_aux_access.aux_control.dpaux.address = payload->address;
4376         cmd.dp_aux_access.aux_control.dpaux.is_i2c_over_aux = payload->i2c_over_aux;
4377         cmd.dp_aux_access.aux_control.dpaux.length = payload->length;
4378
4379         /* set aux action */
4380         if (payload->i2c_over_aux) {
4381                 if (payload->write) {
4382                         if (payload->mot)
4383                                 action = DP_AUX_REQ_ACTION_I2C_WRITE_MOT;
4384                         else
4385                                 action = DP_AUX_REQ_ACTION_I2C_WRITE;
4386                 } else {
4387                         if (payload->mot)
4388                                 action = DP_AUX_REQ_ACTION_I2C_READ_MOT;
4389                         else
4390                                 action = DP_AUX_REQ_ACTION_I2C_READ;
4391                         }
4392         } else {
4393                 if (payload->write)
4394                         action = DP_AUX_REQ_ACTION_DPCD_WRITE;
4395                 else
4396                         action = DP_AUX_REQ_ACTION_DPCD_READ;
4397         }
4398
4399         cmd.dp_aux_access.aux_control.dpaux.action = action;
4400
4401         if (payload->length && payload->write) {
4402                 memcpy(cmd.dp_aux_access.aux_control.dpaux.data,
4403                         payload->data,
4404                         payload->length
4405                         );
4406         }
4407
4408         dc_dmub_srv_cmd_queue(dmub_srv, &cmd);
4409         dc_dmub_srv_cmd_execute(dmub_srv);
4410         dc_dmub_srv_wait_idle(dmub_srv);
4411
4412         return true;
4413 }
4414
4415 uint8_t get_link_index_from_dpia_port_index(const struct dc *dc,
4416                                             uint8_t dpia_port_index)
4417 {
4418         uint8_t index, link_index = 0xFF;
4419
4420         for (index = 0; index < dc->link_count; index++) {
4421                 /* ddc_hw_inst has dpia port index for dpia links
4422                  * and ddc instance for legacy links
4423                  */
4424                 if (!dc->links[index]->ddc->ddc_pin) {
4425                         if (dc->links[index]->ddc_hw_inst == dpia_port_index) {
4426                                 link_index = index;
4427                                 break;
4428                         }
4429                 }
4430         }
4431         ASSERT(link_index != 0xFF);
4432         return link_index;
4433 }
4434
4435 /**
4436  *****************************************************************************
4437  *  Function: dc_process_dmub_set_config_async
4438  *
4439  *  @brief
4440  *              Submits set_config command to dmub via inbox message
4441  *
4442  *  @param
4443  *              [in] dc: dc structure
4444  *              [in] link_index: link index
4445  *              [in] payload: aux payload
4446  *              [out] notify: set_config immediate reply
4447  *
4448  *  @return
4449  *              True if successful, False if failure
4450  *****************************************************************************
4451  */
4452 bool dc_process_dmub_set_config_async(struct dc *dc,
4453                                 uint32_t link_index,
4454                                 struct set_config_cmd_payload *payload,
4455                                 struct dmub_notification *notify)
4456 {
4457         union dmub_rb_cmd cmd = {0};
4458         struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
4459         bool is_cmd_complete = true;
4460
4461         /* prepare SET_CONFIG command */
4462         cmd.set_config_access.header.type = DMUB_CMD__DPIA;
4463         cmd.set_config_access.header.sub_type = DMUB_CMD__DPIA_SET_CONFIG_ACCESS;
4464
4465         cmd.set_config_access.set_config_control.instance = dc->links[link_index]->ddc_hw_inst;
4466         cmd.set_config_access.set_config_control.cmd_pkt.msg_type = payload->msg_type;
4467         cmd.set_config_access.set_config_control.cmd_pkt.msg_data = payload->msg_data;
4468
4469         if (!dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd)) {
4470                 /* command is not processed by dmub */
4471                 notify->sc_status = SET_CONFIG_UNKNOWN_ERROR;
4472                 return is_cmd_complete;
4473         }
4474
4475         /* command processed by dmub, if ret_status is 1, it is completed instantly */
4476         if (cmd.set_config_access.header.ret_status == 1)
4477                 notify->sc_status = cmd.set_config_access.set_config_control.immed_status;
4478         else
4479                 /* cmd pending, will receive notification via outbox */
4480                 is_cmd_complete = false;
4481
4482         return is_cmd_complete;
4483 }
4484
4485 /**
4486  *****************************************************************************
4487  *  Function: dc_process_dmub_set_mst_slots
4488  *
4489  *  @brief
4490  *              Submits mst slot allocation command to dmub via inbox message
4491  *
4492  *  @param
4493  *              [in] dc: dc structure
4494  *              [in] link_index: link index
4495  *              [in] mst_alloc_slots: mst slots to be allotted
4496  *              [out] mst_slots_in_use: mst slots in use returned in failure case
4497  *
4498  *      @return
4499  *              DC_OK if successful, DC_ERROR if failure
4500  *****************************************************************************
4501  */
4502 enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
4503                                 uint32_t link_index,
4504                                 uint8_t mst_alloc_slots,
4505                                 uint8_t *mst_slots_in_use)
4506 {
4507         union dmub_rb_cmd cmd = {0};
4508         struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
4509
4510         /* prepare MST_ALLOC_SLOTS command */
4511         cmd.set_mst_alloc_slots.header.type = DMUB_CMD__DPIA;
4512         cmd.set_mst_alloc_slots.header.sub_type = DMUB_CMD__DPIA_MST_ALLOC_SLOTS;
4513
4514         cmd.set_mst_alloc_slots.mst_slots_control.instance = dc->links[link_index]->ddc_hw_inst;
4515         cmd.set_mst_alloc_slots.mst_slots_control.mst_alloc_slots = mst_alloc_slots;
4516
4517         if (!dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd))
4518                 /* command is not processed by dmub */
4519                 return DC_ERROR_UNEXPECTED;
4520
4521         /* command processed by dmub, if ret_status is 1 */
4522         if (cmd.set_config_access.header.ret_status != 1)
4523                 /* command processing error */
4524                 return DC_ERROR_UNEXPECTED;
4525
4526         /* command processed and we have a status of 2, mst not enabled in dpia */
4527         if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 2)
4528                 return DC_FAIL_UNSUPPORTED_1;
4529
4530         /* previously configured mst alloc and used slots did not match */
4531         if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 3) {
4532                 *mst_slots_in_use = cmd.set_mst_alloc_slots.mst_slots_control.mst_slots_in_use;
4533                 return DC_NOT_SUPPORTED;
4534         }
4535
4536         return DC_OK;
4537 }
4538
4539 /**
4540  * dc_disable_accelerated_mode - disable accelerated mode
4541  * @dc: dc structure
4542  */
4543 void dc_disable_accelerated_mode(struct dc *dc)
4544 {
4545         bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 0);
4546 }
4547
4548
4549 /**
4550  *****************************************************************************
4551  *  dc_notify_vsync_int_state() - notifies vsync enable/disable state
4552  *  @dc: dc structure
4553  *      @stream: stream where vsync int state changed
4554  *      @enable: whether vsync is enabled or disabled
4555  *
4556  *  Called when vsync is enabled/disabled
4557  *      Will notify DMUB to start/stop ABM interrupts after steady state is reached
4558  *
4559  *****************************************************************************
4560  */
4561 void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bool enable)
4562 {
4563         int i;
4564         int edp_num;
4565         struct pipe_ctx *pipe = NULL;
4566         struct dc_link *link = stream->sink->link;
4567         struct dc_link *edp_links[MAX_NUM_EDP];
4568
4569
4570         if (link->psr_settings.psr_feature_enabled)
4571                 return;
4572
4573         /*find primary pipe associated with stream*/
4574         for (i = 0; i < MAX_PIPES; i++) {
4575                 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
4576
4577                 if (pipe->stream == stream && pipe->stream_res.tg)
4578                         break;
4579         }
4580
4581         if (i == MAX_PIPES) {
4582                 ASSERT(0);
4583                 return;
4584         }
4585
4586         get_edp_links(dc, edp_links, &edp_num);
4587
4588         /* Determine panel inst */
4589         for (i = 0; i < edp_num; i++) {
4590                 if (edp_links[i] == link)
4591                         break;
4592         }
4593
4594         if (i == edp_num) {
4595                 return;
4596         }
4597
4598         if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause)
4599                 pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst);
4600 }
4601 /*
4602  * dc_extended_blank_supported: Decide whether extended blank is supported
4603  *
4604  * Extended blank is a freesync optimization feature to be enabled in the future.
4605  * During the extra vblank period gained from freesync, we have the ability to enter z9/z10.
4606  *
4607  * @param [in] dc: Current DC state
4608  * @return: Indicate whether extended blank is supported (true or false)
4609  */
4610 bool dc_extended_blank_supported(struct dc *dc)
4611 {
4612         return dc->debug.extended_blank_optimization && !dc->debug.disable_z10
4613                 && dc->caps.zstate_support && dc->caps.is_apu;
4614 }