drm/amd/display: Rename DCN config to FP
[platform/kernel/linux-rpi.git] / drivers / gpu / drm / amd / display / dc / core / dc.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  */
24
25 #include "dm_services.h"
26
27 #include "dc.h"
28
29 #include "core_status.h"
30 #include "core_types.h"
31 #include "hw_sequencer.h"
32 #include "dce/dce_hwseq.h"
33
34 #include "resource.h"
35
36 #include "gpio_service_interface.h"
37 #include "clk_mgr.h"
38 #include "clock_source.h"
39 #include "dc_bios_types.h"
40
41 #include "bios_parser_interface.h"
42 #include "bios/bios_parser_helper.h"
43 #include "include/irq_service_interface.h"
44 #include "transform.h"
45 #include "dmcu.h"
46 #include "dpp.h"
47 #include "timing_generator.h"
48 #include "abm.h"
49 #include "virtual/virtual_link_encoder.h"
50 #include "hubp.h"
51
52 #include "link_hwss.h"
53 #include "link_encoder.h"
54 #include "link_enc_cfg.h"
55
56 #include "link.h"
57 #include "dm_helpers.h"
58 #include "mem_input.h"
59
60 #include "dc_dmub_srv.h"
61
62 #include "dsc.h"
63
64 #include "vm_helper.h"
65
66 #include "dce/dce_i2c.h"
67
68 #include "dmub/dmub_srv.h"
69
70 #include "dce/dmub_psr.h"
71
72 #include "dce/dmub_hw_lock_mgr.h"
73
74 #include "dc_trace.h"
75
76 #include "hw_sequencer_private.h"
77
78 #include "dce/dmub_outbox.h"
79
80 #define CTX \
81         dc->ctx
82
83 #define DC_LOGGER \
84         dc->ctx->logger
85
86 static const char DC_BUILD_ID[] = "production-build";
87
88 /**
89  * DOC: Overview
90  *
91  * DC is the OS-agnostic component of the amdgpu DC driver.
92  *
93  * DC maintains and validates a set of structs representing the state of the
94  * driver and writes that state to AMD hardware
95  *
96  * Main DC HW structs:
97  *
98  * struct dc - The central struct.  One per driver.  Created on driver load,
99  * destroyed on driver unload.
100  *
101  * struct dc_context - One per driver.
102  * Used as a backpointer by most other structs in dc.
103  *
104  * struct dc_link - One per connector (the physical DP, HDMI, miniDP, or eDP
105  * plugpoints).  Created on driver load, destroyed on driver unload.
106  *
107  * struct dc_sink - One per display.  Created on boot or hotplug.
108  * Destroyed on shutdown or hotunplug.  A dc_link can have a local sink
109  * (the display directly attached).  It may also have one or more remote
110  * sinks (in the Multi-Stream Transport case)
111  *
112  * struct resource_pool - One per driver.  Represents the hw blocks not in the
113  * main pipeline.  Not directly accessible by dm.
114  *
115  * Main dc state structs:
116  *
117  * These structs can be created and destroyed as needed.  There is a full set of
118  * these structs in dc->current_state representing the currently programmed state.
119  *
120  * struct dc_state - The global DC state to track global state information,
121  * such as bandwidth values.
122  *
123  * struct dc_stream_state - Represents the hw configuration for the pipeline from
124  * a framebuffer to a display.  Maps one-to-one with dc_sink.
125  *
126  * struct dc_plane_state - Represents a framebuffer.  Each stream has at least one,
127  * and may have more in the Multi-Plane Overlay case.
128  *
129  * struct resource_context - Represents the programmable state of everything in
130  * the resource_pool.  Not directly accessible by dm.
131  *
132  * struct pipe_ctx - A member of struct resource_context.  Represents the
133  * internal hardware pipeline components.  Each dc_plane_state has either
134  * one or two (in the pipe-split case).
135  */
136
137 /* Private functions */
138
139 static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new)
140 {
141         if (new > *original)
142                 *original = new;
143 }
144
145 static void destroy_links(struct dc *dc)
146 {
147         uint32_t i;
148
149         for (i = 0; i < dc->link_count; i++) {
150                 if (NULL != dc->links[i])
151                         link_destroy(&dc->links[i]);
152         }
153 }
154
155 static uint32_t get_num_of_internal_disp(struct dc_link **links, uint32_t num_links)
156 {
157         int i;
158         uint32_t count = 0;
159
160         for (i = 0; i < num_links; i++) {
161                 if (links[i]->connector_signal == SIGNAL_TYPE_EDP ||
162                                 links[i]->is_internal_display)
163                         count++;
164         }
165
166         return count;
167 }
168
169 static int get_seamless_boot_stream_count(struct dc_state *ctx)
170 {
171         uint8_t i;
172         uint8_t seamless_boot_stream_count = 0;
173
174         for (i = 0; i < ctx->stream_count; i++)
175                 if (ctx->streams[i]->apply_seamless_boot_optimization)
176                         seamless_boot_stream_count++;
177
178         return seamless_boot_stream_count;
179 }
180
181 static bool create_links(
182                 struct dc *dc,
183                 uint32_t num_virtual_links)
184 {
185         int i;
186         int connectors_num;
187         struct dc_bios *bios = dc->ctx->dc_bios;
188
189         dc->link_count = 0;
190
191         connectors_num = bios->funcs->get_connectors_number(bios);
192
193         DC_LOG_DC("BIOS object table - number of connectors: %d", connectors_num);
194
195         if (connectors_num > ENUM_ID_COUNT) {
196                 dm_error(
197                         "DC: Number of connectors %d exceeds maximum of %d!\n",
198                         connectors_num,
199                         ENUM_ID_COUNT);
200                 return false;
201         }
202
203         dm_output_to_console(
204                 "DC: %s: connectors_num: physical:%d, virtual:%d\n",
205                 __func__,
206                 connectors_num,
207                 num_virtual_links);
208
209         for (i = 0; i < connectors_num; i++) {
210                 struct link_init_data link_init_params = {0};
211                 struct dc_link *link;
212
213                 DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count);
214
215                 link_init_params.ctx = dc->ctx;
216                 /* next BIOS object table connector */
217                 link_init_params.connector_index = i;
218                 link_init_params.link_index = dc->link_count;
219                 link_init_params.dc = dc;
220                 link = link_create(&link_init_params);
221
222                 if (link) {
223                         dc->links[dc->link_count] = link;
224                         link->dc = dc;
225                         ++dc->link_count;
226                 }
227         }
228
229         DC_LOG_DC("BIOS object table - end");
230
231         /* Create a link for each usb4 dpia port */
232         for (i = 0; i < dc->res_pool->usb4_dpia_count; i++) {
233                 struct link_init_data link_init_params = {0};
234                 struct dc_link *link;
235
236                 link_init_params.ctx = dc->ctx;
237                 link_init_params.connector_index = i;
238                 link_init_params.link_index = dc->link_count;
239                 link_init_params.dc = dc;
240                 link_init_params.is_dpia_link = true;
241
242                 link = link_create(&link_init_params);
243                 if (link) {
244                         dc->links[dc->link_count] = link;
245                         link->dc = dc;
246                         ++dc->link_count;
247                 }
248         }
249
250         for (i = 0; i < num_virtual_links; i++) {
251                 struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL);
252                 struct encoder_init_data enc_init = {0};
253
254                 if (link == NULL) {
255                         BREAK_TO_DEBUGGER();
256                         goto failed_alloc;
257                 }
258
259                 link->link_index = dc->link_count;
260                 dc->links[dc->link_count] = link;
261                 dc->link_count++;
262
263                 link->ctx = dc->ctx;
264                 link->dc = dc;
265                 link->connector_signal = SIGNAL_TYPE_VIRTUAL;
266                 link->link_id.type = OBJECT_TYPE_CONNECTOR;
267                 link->link_id.id = CONNECTOR_ID_VIRTUAL;
268                 link->link_id.enum_id = ENUM_ID_1;
269                 link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
270
271                 if (!link->link_enc) {
272                         BREAK_TO_DEBUGGER();
273                         goto failed_alloc;
274                 }
275
276                 link->link_status.dpcd_caps = &link->dpcd_caps;
277
278                 enc_init.ctx = dc->ctx;
279                 enc_init.channel = CHANNEL_ID_UNKNOWN;
280                 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
281                 enc_init.transmitter = TRANSMITTER_UNKNOWN;
282                 enc_init.connector = link->link_id;
283                 enc_init.encoder.type = OBJECT_TYPE_ENCODER;
284                 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
285                 enc_init.encoder.enum_id = ENUM_ID_1;
286                 virtual_link_encoder_construct(link->link_enc, &enc_init);
287         }
288
289         dc->caps.num_of_internal_disp = get_num_of_internal_disp(dc->links, dc->link_count);
290
291         return true;
292
293 failed_alloc:
294         return false;
295 }
296
297 /* Create additional DIG link encoder objects if fewer than the platform
298  * supports were created during link construction. This can happen if the
299  * number of physical connectors is less than the number of DIGs.
300  */
301 static bool create_link_encoders(struct dc *dc)
302 {
303         bool res = true;
304         unsigned int num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia;
305         unsigned int num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc;
306         int i;
307
308         /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG
309          * link encoders and physical display endpoints and does not require
310          * additional link encoder objects.
311          */
312         if (num_usb4_dpia == 0)
313                 return res;
314
315         /* Create as many link encoder objects as the platform supports. DPIA
316          * endpoints can be programmably mapped to any DIG.
317          */
318         if (num_dig_link_enc > dc->res_pool->dig_link_enc_count) {
319                 for (i = 0; i < num_dig_link_enc; i++) {
320                         struct link_encoder *link_enc = dc->res_pool->link_encoders[i];
321
322                         if (!link_enc && dc->res_pool->funcs->link_enc_create_minimal) {
323                                 link_enc = dc->res_pool->funcs->link_enc_create_minimal(dc->ctx,
324                                                 (enum engine_id)(ENGINE_ID_DIGA + i));
325                                 if (link_enc) {
326                                         dc->res_pool->link_encoders[i] = link_enc;
327                                         dc->res_pool->dig_link_enc_count++;
328                                 } else {
329                                         res = false;
330                                 }
331                         }
332                 }
333         }
334
335         return res;
336 }
337
338 /* Destroy any additional DIG link encoder objects created by
339  * create_link_encoders().
340  * NB: Must only be called after destroy_links().
341  */
342 static void destroy_link_encoders(struct dc *dc)
343 {
344         unsigned int num_usb4_dpia;
345         unsigned int num_dig_link_enc;
346         int i;
347
348         if (!dc->res_pool)
349                 return;
350
351         num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia;
352         num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc;
353
354         /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG
355          * link encoders and physical display endpoints and does not require
356          * additional link encoder objects.
357          */
358         if (num_usb4_dpia == 0)
359                 return;
360
361         for (i = 0; i < num_dig_link_enc; i++) {
362                 struct link_encoder *link_enc = dc->res_pool->link_encoders[i];
363
364                 if (link_enc) {
365                         link_enc->funcs->destroy(&link_enc);
366                         dc->res_pool->link_encoders[i] = NULL;
367                         dc->res_pool->dig_link_enc_count--;
368                 }
369         }
370 }
371
372 static struct dc_perf_trace *dc_perf_trace_create(void)
373 {
374         return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL);
375 }
376
377 static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)
378 {
379         kfree(*perf_trace);
380         *perf_trace = NULL;
381 }
382
383 /**
384  *  dc_stream_adjust_vmin_vmax - look up pipe context & update parts of DRR
385  *  @dc:     dc reference
386  *  @stream: Initial dc stream state
387  *  @adjust: Updated parameters for vertical_total_min and vertical_total_max
388  *
389  *  Looks up the pipe context of dc_stream_state and updates the
390  *  vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh
391  *  Rate, which is a power-saving feature that targets reducing panel
392  *  refresh rate while the screen is static
393  *
394  *  Return: %true if the pipe context is found and adjusted;
395  *          %false if the pipe context is not found.
396  */
397 bool dc_stream_adjust_vmin_vmax(struct dc *dc,
398                 struct dc_stream_state *stream,
399                 struct dc_crtc_timing_adjust *adjust)
400 {
401         int i;
402
403         /*
404          * Don't adjust DRR while there's bandwidth optimizations pending to
405          * avoid conflicting with firmware updates.
406          */
407         if (dc->optimized_required || dc->wm_optimized_required)
408                 return false;
409
410         stream->adjust.v_total_max = adjust->v_total_max;
411         stream->adjust.v_total_mid = adjust->v_total_mid;
412         stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num;
413         stream->adjust.v_total_min = adjust->v_total_min;
414
415         for (i = 0; i < MAX_PIPES; i++) {
416                 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
417
418                 if (pipe->stream == stream && pipe->stream_res.tg) {
419                         dc->hwss.set_drr(&pipe,
420                                         1,
421                                         *adjust);
422
423                         return true;
424                 }
425         }
426         return false;
427 }
428
429 /**
430  * dc_stream_get_last_used_drr_vtotal - Looks up the pipe context of
431  * dc_stream_state and gets the last VTOTAL used by DRR (Dynamic Refresh Rate)
432  *
433  * @dc: [in] dc reference
434  * @stream: [in] Initial dc stream state
435  * @refresh_rate: [in] new refresh_rate
436  *
437  * Return: %true if the pipe context is found and there is an associated
438  *         timing_generator for the DC;
439  *         %false if the pipe context is not found or there is no
440  *         timing_generator for the DC.
441  */
442 bool dc_stream_get_last_used_drr_vtotal(struct dc *dc,
443                 struct dc_stream_state *stream,
444                 uint32_t *refresh_rate)
445 {
446         bool status = false;
447
448         int i = 0;
449
450         for (i = 0; i < MAX_PIPES; i++) {
451                 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
452
453                 if (pipe->stream == stream && pipe->stream_res.tg) {
454                         /* Only execute if a function pointer has been defined for
455                          * the DC version in question
456                          */
457                         if (pipe->stream_res.tg->funcs->get_last_used_drr_vtotal) {
458                                 pipe->stream_res.tg->funcs->get_last_used_drr_vtotal(pipe->stream_res.tg, refresh_rate);
459
460                                 status = true;
461
462                                 break;
463                         }
464                 }
465         }
466
467         return status;
468 }
469
470 bool dc_stream_get_crtc_position(struct dc *dc,
471                 struct dc_stream_state **streams, int num_streams,
472                 unsigned int *v_pos, unsigned int *nom_v_pos)
473 {
474         /* TODO: Support multiple streams */
475         const struct dc_stream_state *stream = streams[0];
476         int i;
477         bool ret = false;
478         struct crtc_position position;
479
480         for (i = 0; i < MAX_PIPES; i++) {
481                 struct pipe_ctx *pipe =
482                                 &dc->current_state->res_ctx.pipe_ctx[i];
483
484                 if (pipe->stream == stream && pipe->stream_res.stream_enc) {
485                         dc->hwss.get_position(&pipe, 1, &position);
486
487                         *v_pos = position.vertical_count;
488                         *nom_v_pos = position.nominal_vcount;
489                         ret = true;
490                 }
491         }
492         return ret;
493 }
494
495 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
496 static inline void
497 dc_stream_forward_dmub_crc_window(struct dc_dmub_srv *dmub_srv,
498                 struct rect *rect, struct otg_phy_mux *mux_mapping, bool is_stop)
499 {
500         union dmub_rb_cmd cmd = {0};
501
502         cmd.secure_display.roi_info.phy_id = mux_mapping->phy_output_num;
503         cmd.secure_display.roi_info.otg_id = mux_mapping->otg_output_num;
504
505         if (is_stop) {
506                 cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY;
507                 cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_CRC_STOP_UPDATE;
508         } else {
509                 cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY;
510                 cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_CRC_WIN_NOTIFY;
511                 cmd.secure_display.roi_info.x_start = rect->x;
512                 cmd.secure_display.roi_info.y_start = rect->y;
513                 cmd.secure_display.roi_info.x_end = rect->x + rect->width;
514                 cmd.secure_display.roi_info.y_end = rect->y + rect->height;
515         }
516
517         dc_dmub_srv_cmd_queue(dmub_srv, &cmd);
518         dc_dmub_srv_cmd_execute(dmub_srv);
519 }
520
521 static inline void
522 dc_stream_forward_dmcu_crc_window(struct dmcu *dmcu,
523                 struct rect *rect, struct otg_phy_mux *mux_mapping, bool is_stop)
524 {
525         if (is_stop)
526                 dmcu->funcs->stop_crc_win_update(dmcu, mux_mapping);
527         else
528                 dmcu->funcs->forward_crc_window(dmcu, rect, mux_mapping);
529 }
530
531 bool
532 dc_stream_forward_crc_window(struct dc_stream_state *stream,
533                 struct rect *rect, bool is_stop)
534 {
535         struct dmcu *dmcu;
536         struct dc_dmub_srv *dmub_srv;
537         struct otg_phy_mux mux_mapping;
538         struct pipe_ctx *pipe;
539         int i;
540         struct dc *dc = stream->ctx->dc;
541
542         for (i = 0; i < MAX_PIPES; i++) {
543                 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
544                 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
545                         break;
546         }
547
548         /* Stream not found */
549         if (i == MAX_PIPES)
550                 return false;
551
552         mux_mapping.phy_output_num = stream->link->link_enc_hw_inst;
553         mux_mapping.otg_output_num = pipe->stream_res.tg->inst;
554
555         dmcu = dc->res_pool->dmcu;
556         dmub_srv = dc->ctx->dmub_srv;
557
558         /* forward to dmub */
559         if (dmub_srv)
560                 dc_stream_forward_dmub_crc_window(dmub_srv, rect, &mux_mapping, is_stop);
561         /* forward to dmcu */
562         else if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu))
563                 dc_stream_forward_dmcu_crc_window(dmcu, rect, &mux_mapping, is_stop);
564         else
565                 return false;
566
567         return true;
568 }
569 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
570
571 /**
572  * dc_stream_configure_crc() - Configure CRC capture for the given stream.
573  * @dc: DC Object
574  * @stream: The stream to configure CRC on.
575  * @enable: Enable CRC if true, disable otherwise.
576  * @crc_window: CRC window (x/y start/end) information
577  * @continuous: Capture CRC on every frame if true. Otherwise, only capture
578  *              once.
579  *
580  * By default, only CRC0 is configured, and the entire frame is used to
581  * calculate the CRC.
582  *
583  * Return: %false if the stream is not found or CRC capture is not supported;
584  *         %true if the stream has been configured.
585  */
586 bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
587                              struct crc_params *crc_window, bool enable, bool continuous)
588 {
589         int i;
590         struct pipe_ctx *pipe;
591         struct crc_params param;
592         struct timing_generator *tg;
593
594         for (i = 0; i < MAX_PIPES; i++) {
595                 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
596                 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
597                         break;
598         }
599         /* Stream not found */
600         if (i == MAX_PIPES)
601                 return false;
602
603         /* By default, capture the full frame */
604         param.windowa_x_start = 0;
605         param.windowa_y_start = 0;
606         param.windowa_x_end = pipe->stream->timing.h_addressable;
607         param.windowa_y_end = pipe->stream->timing.v_addressable;
608         param.windowb_x_start = 0;
609         param.windowb_y_start = 0;
610         param.windowb_x_end = pipe->stream->timing.h_addressable;
611         param.windowb_y_end = pipe->stream->timing.v_addressable;
612
613         if (crc_window) {
614                 param.windowa_x_start = crc_window->windowa_x_start;
615                 param.windowa_y_start = crc_window->windowa_y_start;
616                 param.windowa_x_end = crc_window->windowa_x_end;
617                 param.windowa_y_end = crc_window->windowa_y_end;
618                 param.windowb_x_start = crc_window->windowb_x_start;
619                 param.windowb_y_start = crc_window->windowb_y_start;
620                 param.windowb_x_end = crc_window->windowb_x_end;
621                 param.windowb_y_end = crc_window->windowb_y_end;
622         }
623
624         param.dsc_mode = pipe->stream->timing.flags.DSC ? 1:0;
625         param.odm_mode = pipe->next_odm_pipe ? 1:0;
626
627         /* Default to the union of both windows */
628         param.selection = UNION_WINDOW_A_B;
629         param.continuous_mode = continuous;
630         param.enable = enable;
631
632         tg = pipe->stream_res.tg;
633
634         /* Only call if supported */
635         if (tg->funcs->configure_crc)
636                 return tg->funcs->configure_crc(tg, &param);
637         DC_LOG_WARNING("CRC capture not supported.");
638         return false;
639 }
640
641 /**
642  * dc_stream_get_crc() - Get CRC values for the given stream.
643  *
644  * @dc: DC object.
645  * @stream: The DC stream state of the stream to get CRCs from.
646  * @r_cr: CRC value for the red component.
647  * @g_y:  CRC value for the green component.
648  * @b_cb: CRC value for the blue component.
649  *
650  * dc_stream_configure_crc needs to be called beforehand to enable CRCs.
651  *
652  * Return:
653  * %false if stream is not found, or if CRCs are not enabled.
654  */
655 bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
656                        uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
657 {
658         int i;
659         struct pipe_ctx *pipe;
660         struct timing_generator *tg;
661
662         for (i = 0; i < MAX_PIPES; i++) {
663                 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
664                 if (pipe->stream == stream)
665                         break;
666         }
667         /* Stream not found */
668         if (i == MAX_PIPES)
669                 return false;
670
671         tg = pipe->stream_res.tg;
672
673         if (tg->funcs->get_crc)
674                 return tg->funcs->get_crc(tg, r_cr, g_y, b_cb);
675         DC_LOG_WARNING("CRC capture not supported.");
676         return false;
677 }
678
679 void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,
680                 enum dc_dynamic_expansion option)
681 {
682         /* OPP FMT dyn expansion updates*/
683         int i;
684         struct pipe_ctx *pipe_ctx;
685
686         for (i = 0; i < MAX_PIPES; i++) {
687                 if (dc->current_state->res_ctx.pipe_ctx[i].stream
688                                 == stream) {
689                         pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
690                         pipe_ctx->stream_res.opp->dyn_expansion = option;
691                         pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
692                                         pipe_ctx->stream_res.opp,
693                                         COLOR_SPACE_YCBCR601,
694                                         stream->timing.display_color_depth,
695                                         stream->signal);
696                 }
697         }
698 }
699
700 void dc_stream_set_dither_option(struct dc_stream_state *stream,
701                 enum dc_dither_option option)
702 {
703         struct bit_depth_reduction_params params;
704         struct dc_link *link = stream->link;
705         struct pipe_ctx *pipes = NULL;
706         int i;
707
708         for (i = 0; i < MAX_PIPES; i++) {
709                 if (link->dc->current_state->res_ctx.pipe_ctx[i].stream ==
710                                 stream) {
711                         pipes = &link->dc->current_state->res_ctx.pipe_ctx[i];
712                         break;
713                 }
714         }
715
716         if (!pipes)
717                 return;
718         if (option > DITHER_OPTION_MAX)
719                 return;
720
721         stream->dither_option = option;
722
723         memset(&params, 0, sizeof(params));
724         resource_build_bit_depth_reduction_params(stream, &params);
725         stream->bit_depth_params = params;
726
727         if (pipes->plane_res.xfm &&
728             pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) {
729                 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth(
730                         pipes->plane_res.xfm,
731                         pipes->plane_res.scl_data.lb_params.depth,
732                         &stream->bit_depth_params);
733         }
734
735         pipes->stream_res.opp->funcs->
736                 opp_program_bit_depth_reduction(pipes->stream_res.opp, &params);
737 }
738
739 bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream)
740 {
741         int i;
742         bool ret = false;
743         struct pipe_ctx *pipes;
744
745         for (i = 0; i < MAX_PIPES; i++) {
746                 if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
747                         pipes = &dc->current_state->res_ctx.pipe_ctx[i];
748                         dc->hwss.program_gamut_remap(pipes);
749                         ret = true;
750                 }
751         }
752
753         return ret;
754 }
755
756 bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
757 {
758         int i;
759         bool ret = false;
760         struct pipe_ctx *pipes;
761
762         for (i = 0; i < MAX_PIPES; i++) {
763                 if (dc->current_state->res_ctx.pipe_ctx[i].stream
764                                 == stream) {
765
766                         pipes = &dc->current_state->res_ctx.pipe_ctx[i];
767                         dc->hwss.program_output_csc(dc,
768                                         pipes,
769                                         stream->output_color_space,
770                                         stream->csc_color_matrix.matrix,
771                                         pipes->stream_res.opp->inst);
772                         ret = true;
773                 }
774         }
775
776         return ret;
777 }
778
779 void dc_stream_set_static_screen_params(struct dc *dc,
780                 struct dc_stream_state **streams,
781                 int num_streams,
782                 const struct dc_static_screen_params *params)
783 {
784         int i, j;
785         struct pipe_ctx *pipes_affected[MAX_PIPES];
786         int num_pipes_affected = 0;
787
788         for (i = 0; i < num_streams; i++) {
789                 struct dc_stream_state *stream = streams[i];
790
791                 for (j = 0; j < MAX_PIPES; j++) {
792                         if (dc->current_state->res_ctx.pipe_ctx[j].stream
793                                         == stream) {
794                                 pipes_affected[num_pipes_affected++] =
795                                                 &dc->current_state->res_ctx.pipe_ctx[j];
796                         }
797                 }
798         }
799
800         dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, params);
801 }
802
803 static void dc_destruct(struct dc *dc)
804 {
805         // reset link encoder assignment table on destruct
806         if (dc->res_pool && dc->res_pool->funcs->link_encs_assign)
807                 link_enc_cfg_init(dc, dc->current_state);
808
809         if (dc->current_state) {
810                 dc_release_state(dc->current_state);
811                 dc->current_state = NULL;
812         }
813
814         destroy_links(dc);
815
816         destroy_link_encoders(dc);
817
818         if (dc->clk_mgr) {
819                 dc_destroy_clk_mgr(dc->clk_mgr);
820                 dc->clk_mgr = NULL;
821         }
822
823         dc_destroy_resource_pool(dc);
824
825         if (dc->ctx->gpio_service)
826                 dal_gpio_service_destroy(&dc->ctx->gpio_service);
827
828         if (dc->ctx->created_bios)
829                 dal_bios_parser_destroy(&dc->ctx->dc_bios);
830
831         dc_perf_trace_destroy(&dc->ctx->perf_trace);
832
833         kfree(dc->ctx);
834         dc->ctx = NULL;
835
836         kfree(dc->bw_vbios);
837         dc->bw_vbios = NULL;
838
839         kfree(dc->bw_dceip);
840         dc->bw_dceip = NULL;
841
842         kfree(dc->dcn_soc);
843         dc->dcn_soc = NULL;
844
845         kfree(dc->dcn_ip);
846         dc->dcn_ip = NULL;
847
848         kfree(dc->vm_helper);
849         dc->vm_helper = NULL;
850
851 }
852
853 static bool dc_construct_ctx(struct dc *dc,
854                 const struct dc_init_data *init_params)
855 {
856         struct dc_context *dc_ctx;
857         enum dce_version dc_version = DCE_VERSION_UNKNOWN;
858
859         dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
860         if (!dc_ctx)
861                 return false;
862
863         dc_ctx->cgs_device = init_params->cgs_device;
864         dc_ctx->driver_context = init_params->driver;
865         dc_ctx->dc = dc;
866         dc_ctx->asic_id = init_params->asic_id;
867         dc_ctx->dc_sink_id_count = 0;
868         dc_ctx->dc_stream_id_count = 0;
869         dc_ctx->dce_environment = init_params->dce_environment;
870         dc_ctx->dcn_reg_offsets = init_params->dcn_reg_offsets;
871         dc_ctx->nbio_reg_offsets = init_params->nbio_reg_offsets;
872
873         /* Create logger */
874
875         dc_version = resource_parse_asic_id(init_params->asic_id);
876         dc_ctx->dce_version = dc_version;
877
878         dc_ctx->perf_trace = dc_perf_trace_create();
879         if (!dc_ctx->perf_trace) {
880                 kfree(dc_ctx);
881                 ASSERT_CRITICAL(false);
882                 return false;
883         }
884
885         dc->ctx = dc_ctx;
886
887         return true;
888 }
889
890 static bool dc_construct(struct dc *dc,
891                 const struct dc_init_data *init_params)
892 {
893         struct dc_context *dc_ctx;
894         struct bw_calcs_dceip *dc_dceip;
895         struct bw_calcs_vbios *dc_vbios;
896         struct dcn_soc_bounding_box *dcn_soc;
897         struct dcn_ip_params *dcn_ip;
898
899         dc->config = init_params->flags;
900
901         // Allocate memory for the vm_helper
902         dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL);
903         if (!dc->vm_helper) {
904                 dm_error("%s: failed to create dc->vm_helper\n", __func__);
905                 goto fail;
906         }
907
908         memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides));
909
910         dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
911         if (!dc_dceip) {
912                 dm_error("%s: failed to create dceip\n", __func__);
913                 goto fail;
914         }
915
916         dc->bw_dceip = dc_dceip;
917
918         dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL);
919         if (!dc_vbios) {
920                 dm_error("%s: failed to create vbios\n", __func__);
921                 goto fail;
922         }
923
924         dc->bw_vbios = dc_vbios;
925         dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);
926         if (!dcn_soc) {
927                 dm_error("%s: failed to create dcn_soc\n", __func__);
928                 goto fail;
929         }
930
931         dc->dcn_soc = dcn_soc;
932
933         dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL);
934         if (!dcn_ip) {
935                 dm_error("%s: failed to create dcn_ip\n", __func__);
936                 goto fail;
937         }
938
939         dc->dcn_ip = dcn_ip;
940
941         if (!dc_construct_ctx(dc, init_params)) {
942                 dm_error("%s: failed to create ctx\n", __func__);
943                 goto fail;
944         }
945
946         dc_ctx = dc->ctx;
947
948         /* Resource should construct all asic specific resources.
949          * This should be the only place where we need to parse the asic id
950          */
951         if (init_params->vbios_override)
952                 dc_ctx->dc_bios = init_params->vbios_override;
953         else {
954                 /* Create BIOS parser */
955                 struct bp_init_data bp_init_data;
956
957                 bp_init_data.ctx = dc_ctx;
958                 bp_init_data.bios = init_params->asic_id.atombios_base_address;
959
960                 dc_ctx->dc_bios = dal_bios_parser_create(
961                                 &bp_init_data, dc_ctx->dce_version);
962
963                 if (!dc_ctx->dc_bios) {
964                         ASSERT_CRITICAL(false);
965                         goto fail;
966                 }
967
968                 dc_ctx->created_bios = true;
969         }
970
971         dc->vendor_signature = init_params->vendor_signature;
972
973         /* Create GPIO service */
974         dc_ctx->gpio_service = dal_gpio_service_create(
975                         dc_ctx->dce_version,
976                         dc_ctx->dce_environment,
977                         dc_ctx);
978
979         if (!dc_ctx->gpio_service) {
980                 ASSERT_CRITICAL(false);
981                 goto fail;
982         }
983
984         dc->link_srv = link_get_link_service();
985
986         dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version);
987         if (!dc->res_pool)
988                 goto fail;
989
990         /* set i2c speed if not done by the respective dcnxxx__resource.c */
991         if (dc->caps.i2c_speed_in_khz_hdcp == 0)
992                 dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz;
993
994         dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
995         if (!dc->clk_mgr)
996                 goto fail;
997 #ifdef CONFIG_DRM_AMD_DC_FP
998         dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present;
999
1000         if (dc->res_pool->funcs->update_bw_bounding_box) {
1001                 DC_FP_START();
1002                 dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
1003                 DC_FP_END();
1004         }
1005 #endif
1006
1007         /* Creation of current_state must occur after dc->dml
1008          * is initialized in dc_create_resource_pool because
1009          * on creation it copies the contents of dc->dml
1010          */
1011
1012         dc->current_state = dc_create_state(dc);
1013
1014         if (!dc->current_state) {
1015                 dm_error("%s: failed to create validate ctx\n", __func__);
1016                 goto fail;
1017         }
1018
1019         if (!create_links(dc, init_params->num_virtual_links))
1020                 goto fail;
1021
1022         /* Create additional DIG link encoder objects if fewer than the platform
1023          * supports were created during link construction.
1024          */
1025         if (!create_link_encoders(dc))
1026                 goto fail;
1027
1028         dc_resource_state_construct(dc, dc->current_state);
1029
1030         return true;
1031
1032 fail:
1033         return false;
1034 }
1035
1036 static void disable_all_writeback_pipes_for_stream(
1037                 const struct dc *dc,
1038                 struct dc_stream_state *stream,
1039                 struct dc_state *context)
1040 {
1041         int i;
1042
1043         for (i = 0; i < stream->num_wb_info; i++)
1044                 stream->writeback_info[i].wb_enabled = false;
1045 }
1046
1047 static void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *context,
1048                                           struct dc_stream_state *stream, bool lock)
1049 {
1050         int i;
1051
1052         /* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */
1053         if (dc->hwss.interdependent_update_lock)
1054                 dc->hwss.interdependent_update_lock(dc, context, lock);
1055         else {
1056                 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1057                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1058                         struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1059
1060                         // Copied conditions that were previously in dce110_apply_ctx_for_surface
1061                         if (stream == pipe_ctx->stream) {
1062                                 if (!pipe_ctx->top_pipe &&
1063                                         (pipe_ctx->plane_state || old_pipe_ctx->plane_state))
1064                                         dc->hwss.pipe_control_lock(dc, pipe_ctx, lock);
1065                         }
1066                 }
1067         }
1068 }
1069
1070 static void phantom_pipe_blank(
1071                 struct dc *dc,
1072                 struct timing_generator *tg,
1073                 int width,
1074                 int height)
1075 {
1076         struct dce_hwseq *hws = dc->hwseq;
1077         enum dc_color_space color_space;
1078         struct tg_color black_color = {0};
1079         struct output_pixel_processor *opp = NULL;
1080         uint32_t num_opps, opp_id_src0, opp_id_src1;
1081         uint32_t otg_active_width, otg_active_height;
1082
1083         /* program opp dpg blank color */
1084         color_space = COLOR_SPACE_SRGB;
1085         color_space_to_black_color(dc, color_space, &black_color);
1086
1087         otg_active_width = width;
1088         otg_active_height = height;
1089
1090         /* get the OPTC source */
1091         tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
1092         ASSERT(opp_id_src0 < dc->res_pool->res_cap->num_opp);
1093         opp = dc->res_pool->opps[opp_id_src0];
1094
1095         opp->funcs->opp_set_disp_pattern_generator(
1096                         opp,
1097                         CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR,
1098                         CONTROLLER_DP_COLOR_SPACE_UDEFINED,
1099                         COLOR_DEPTH_UNDEFINED,
1100                         &black_color,
1101                         otg_active_width,
1102                         otg_active_height,
1103                         0);
1104
1105         if (tg->funcs->is_tg_enabled(tg))
1106                 hws->funcs.wait_for_blank_complete(opp);
1107 }
1108
1109 static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
1110 {
1111         int i, j;
1112         struct dc_state *dangling_context = dc_create_state(dc);
1113         struct dc_state *current_ctx;
1114         struct pipe_ctx *pipe;
1115         struct timing_generator *tg;
1116
1117         if (dangling_context == NULL)
1118                 return;
1119
1120         dc_resource_state_copy_construct(dc->current_state, dangling_context);
1121
1122         for (i = 0; i < dc->res_pool->pipe_count; i++) {
1123                 struct dc_stream_state *old_stream =
1124                                 dc->current_state->res_ctx.pipe_ctx[i].stream;
1125                 bool should_disable = true;
1126                 bool pipe_split_change = false;
1127
1128                 if ((context->res_ctx.pipe_ctx[i].top_pipe) &&
1129                         (dc->current_state->res_ctx.pipe_ctx[i].top_pipe))
1130                         pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe->pipe_idx !=
1131                                 dc->current_state->res_ctx.pipe_ctx[i].top_pipe->pipe_idx;
1132                 else
1133                         pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe !=
1134                                 dc->current_state->res_ctx.pipe_ctx[i].top_pipe;
1135
1136                 for (j = 0; j < context->stream_count; j++) {
1137                         if (old_stream == context->streams[j]) {
1138                                 should_disable = false;
1139                                 break;
1140                         }
1141                 }
1142                 if (!should_disable && pipe_split_change &&
1143                                 dc->current_state->stream_count != context->stream_count)
1144                         should_disable = true;
1145
1146                 if (old_stream && !dc->current_state->res_ctx.pipe_ctx[i].top_pipe &&
1147                                 !dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe) {
1148                         struct pipe_ctx *old_pipe, *new_pipe;
1149
1150                         old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1151                         new_pipe = &context->res_ctx.pipe_ctx[i];
1152
1153                         if (old_pipe->plane_state && !new_pipe->plane_state)
1154                                 should_disable = true;
1155                 }
1156
1157                 if (should_disable && old_stream) {
1158                         pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1159                         tg = pipe->stream_res.tg;
1160                         /* When disabling plane for a phantom pipe, we must turn on the
1161                          * phantom OTG so the disable programming gets the double buffer
1162                          * update. Otherwise the pipe will be left in a partially disabled
1163                          * state that can result in underflow or hang when enabling it
1164                          * again for different use.
1165                          */
1166                         if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) {
1167                                 if (tg->funcs->enable_crtc) {
1168                                         int main_pipe_width, main_pipe_height;
1169
1170                                         main_pipe_width = old_stream->mall_stream_config.paired_stream->dst.width;
1171                                         main_pipe_height = old_stream->mall_stream_config.paired_stream->dst.height;
1172                                         phantom_pipe_blank(dc, tg, main_pipe_width, main_pipe_height);
1173                                         tg->funcs->enable_crtc(tg);
1174                                 }
1175                         }
1176                         dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
1177                         disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
1178
1179                         if (dc->hwss.apply_ctx_for_surface) {
1180                                 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true);
1181                                 dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
1182                                 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, false);
1183                                 dc->hwss.post_unlock_program_front_end(dc, dangling_context);
1184                         }
1185                         if (dc->hwss.program_front_end_for_ctx) {
1186                                 dc->hwss.interdependent_update_lock(dc, dc->current_state, true);
1187                                 dc->hwss.program_front_end_for_ctx(dc, dangling_context);
1188                                 dc->hwss.interdependent_update_lock(dc, dc->current_state, false);
1189                                 dc->hwss.post_unlock_program_front_end(dc, dangling_context);
1190                         }
1191                         /* We need to put the phantom OTG back into it's default (disabled) state or we
1192                          * can get corruption when transition from one SubVP config to a different one.
1193                          * The OTG is set to disable on falling edge of VUPDATE so the plane disable
1194                          * will still get it's double buffer update.
1195                          */
1196                         if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) {
1197                                 if (tg->funcs->disable_phantom_crtc)
1198                                         tg->funcs->disable_phantom_crtc(tg);
1199                         }
1200                 }
1201         }
1202
1203         current_ctx = dc->current_state;
1204         dc->current_state = dangling_context;
1205         dc_release_state(current_ctx);
1206 }
1207
1208 static void disable_vbios_mode_if_required(
1209                 struct dc *dc,
1210                 struct dc_state *context)
1211 {
1212         unsigned int i, j;
1213
1214         /* check if timing_changed, disable stream*/
1215         for (i = 0; i < dc->res_pool->pipe_count; i++) {
1216                 struct dc_stream_state *stream = NULL;
1217                 struct dc_link *link = NULL;
1218                 struct pipe_ctx *pipe = NULL;
1219
1220                 pipe = &context->res_ctx.pipe_ctx[i];
1221                 stream = pipe->stream;
1222                 if (stream == NULL)
1223                         continue;
1224
1225                 // only looking for first odm pipe
1226                 if (pipe->prev_odm_pipe)
1227                         continue;
1228
1229                 if (stream->link->local_sink &&
1230                         stream->link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
1231                         link = stream->link;
1232                 }
1233
1234                 if (link != NULL && link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
1235                         unsigned int enc_inst, tg_inst = 0;
1236                         unsigned int pix_clk_100hz;
1237
1238                         enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
1239                         if (enc_inst != ENGINE_ID_UNKNOWN) {
1240                                 for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
1241                                         if (dc->res_pool->stream_enc[j]->id == enc_inst) {
1242                                                 tg_inst = dc->res_pool->stream_enc[j]->funcs->dig_source_otg(
1243                                                         dc->res_pool->stream_enc[j]);
1244                                                 break;
1245                                         }
1246                                 }
1247
1248                                 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1249                                         dc->res_pool->dp_clock_source,
1250                                         tg_inst, &pix_clk_100hz);
1251
1252                                 if (link->link_status.link_active) {
1253                                         uint32_t requested_pix_clk_100hz =
1254                                                 pipe->stream_res.pix_clk_params.requested_pix_clk_100hz;
1255
1256                                         if (pix_clk_100hz != requested_pix_clk_100hz) {
1257                                                 link_set_dpms_off(pipe);
1258                                                 pipe->stream->dpms_off = false;
1259                                         }
1260                                 }
1261                         }
1262                 }
1263         }
1264 }
1265
1266 static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
1267 {
1268         int i;
1269         PERF_TRACE();
1270         for (i = 0; i < MAX_PIPES; i++) {
1271                 int count = 0;
1272                 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
1273
1274                 if (!pipe->plane_state || pipe->stream->mall_stream_config.type == SUBVP_PHANTOM)
1275                         continue;
1276
1277                 /* Timeout 100 ms */
1278                 while (count < 100000) {
1279                         /* Must set to false to start with, due to OR in update function */
1280                         pipe->plane_state->status.is_flip_pending = false;
1281                         dc->hwss.update_pending_status(pipe);
1282                         if (!pipe->plane_state->status.is_flip_pending)
1283                                 break;
1284                         udelay(1);
1285                         count++;
1286                 }
1287                 ASSERT(!pipe->plane_state->status.is_flip_pending);
1288         }
1289         PERF_TRACE();
1290 }
1291
1292 /* Public functions */
1293
1294 struct dc *dc_create(const struct dc_init_data *init_params)
1295 {
1296         struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
1297         unsigned int full_pipe_count;
1298
1299         if (!dc)
1300                 return NULL;
1301
1302         if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) {
1303                 if (!dc_construct_ctx(dc, init_params))
1304                         goto destruct_dc;
1305         } else {
1306                 if (!dc_construct(dc, init_params))
1307                         goto destruct_dc;
1308
1309                 full_pipe_count = dc->res_pool->pipe_count;
1310                 if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
1311                         full_pipe_count--;
1312                 dc->caps.max_streams = min(
1313                                 full_pipe_count,
1314                                 dc->res_pool->stream_enc_count);
1315
1316                 dc->caps.max_links = dc->link_count;
1317                 dc->caps.max_audios = dc->res_pool->audio_count;
1318                 dc->caps.linear_pitch_alignment = 64;
1319
1320                 dc->caps.max_dp_protocol_version = DP_VERSION_1_4;
1321
1322                 dc->caps.max_otg_num = dc->res_pool->res_cap->num_timing_generator;
1323
1324                 if (dc->res_pool->dmcu != NULL)
1325                         dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
1326         }
1327
1328         dc->dcn_reg_offsets = init_params->dcn_reg_offsets;
1329         dc->nbio_reg_offsets = init_params->nbio_reg_offsets;
1330
1331         /* Populate versioning information */
1332         dc->versions.dc_ver = DC_VER;
1333
1334         dc->build_id = DC_BUILD_ID;
1335
1336         DC_LOG_DC("Display Core initialized\n");
1337
1338
1339
1340         return dc;
1341
1342 destruct_dc:
1343         dc_destruct(dc);
1344         kfree(dc);
1345         return NULL;
1346 }
1347
1348 static void detect_edp_presence(struct dc *dc)
1349 {
1350         struct dc_link *edp_links[MAX_NUM_EDP];
1351         struct dc_link *edp_link = NULL;
1352         enum dc_connection_type type;
1353         int i;
1354         int edp_num;
1355
1356         dc_get_edp_links(dc, edp_links, &edp_num);
1357         if (!edp_num)
1358                 return;
1359
1360         for (i = 0; i < edp_num; i++) {
1361                 edp_link = edp_links[i];
1362                 if (dc->config.edp_not_connected) {
1363                         edp_link->edp_sink_present = false;
1364                 } else {
1365                         dc_link_detect_connection_type(edp_link, &type);
1366                         edp_link->edp_sink_present = (type != dc_connection_none);
1367                 }
1368         }
1369 }
1370
1371 void dc_hardware_init(struct dc *dc)
1372 {
1373
1374         detect_edp_presence(dc);
1375         if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW)
1376                 dc->hwss.init_hw(dc);
1377 }
1378
1379 void dc_init_callbacks(struct dc *dc,
1380                 const struct dc_callback_init *init_params)
1381 {
1382         dc->ctx->cp_psp = init_params->cp_psp;
1383 }
1384
1385 void dc_deinit_callbacks(struct dc *dc)
1386 {
1387         memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp));
1388 }
1389
1390 void dc_destroy(struct dc **dc)
1391 {
1392         dc_destruct(*dc);
1393         kfree(*dc);
1394         *dc = NULL;
1395 }
1396
1397 static void enable_timing_multisync(
1398                 struct dc *dc,
1399                 struct dc_state *ctx)
1400 {
1401         int i, multisync_count = 0;
1402         int pipe_count = dc->res_pool->pipe_count;
1403         struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL };
1404
1405         for (i = 0; i < pipe_count; i++) {
1406                 if (!ctx->res_ctx.pipe_ctx[i].stream ||
1407                                 !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled)
1408                         continue;
1409                 if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source)
1410                         continue;
1411                 multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i];
1412                 multisync_count++;
1413         }
1414
1415         if (multisync_count > 0) {
1416                 dc->hwss.enable_per_frame_crtc_position_reset(
1417                         dc, multisync_count, multisync_pipes);
1418         }
1419 }
1420
1421 static void program_timing_sync(
1422                 struct dc *dc,
1423                 struct dc_state *ctx)
1424 {
1425         int i, j, k;
1426         int group_index = 0;
1427         int num_group = 0;
1428         int pipe_count = dc->res_pool->pipe_count;
1429         struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
1430
1431         for (i = 0; i < pipe_count; i++) {
1432                 if (!ctx->res_ctx.pipe_ctx[i].stream
1433                                 || ctx->res_ctx.pipe_ctx[i].top_pipe
1434                                 || ctx->res_ctx.pipe_ctx[i].prev_odm_pipe)
1435                         continue;
1436
1437                 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
1438         }
1439
1440         for (i = 0; i < pipe_count; i++) {
1441                 int group_size = 1;
1442                 enum timing_synchronization_type sync_type = NOT_SYNCHRONIZABLE;
1443                 struct pipe_ctx *pipe_set[MAX_PIPES];
1444
1445                 if (!unsynced_pipes[i])
1446                         continue;
1447
1448                 pipe_set[0] = unsynced_pipes[i];
1449                 unsynced_pipes[i] = NULL;
1450
1451                 /* Add tg to the set, search rest of the tg's for ones with
1452                  * same timing, add all tgs with same timing to the group
1453                  */
1454                 for (j = i + 1; j < pipe_count; j++) {
1455                         if (!unsynced_pipes[j])
1456                                 continue;
1457                         if (sync_type != TIMING_SYNCHRONIZABLE &&
1458                                 dc->hwss.enable_vblanks_synchronization &&
1459                                 unsynced_pipes[j]->stream_res.tg->funcs->align_vblanks &&
1460                                 resource_are_vblanks_synchronizable(
1461                                         unsynced_pipes[j]->stream,
1462                                         pipe_set[0]->stream)) {
1463                                 sync_type = VBLANK_SYNCHRONIZABLE;
1464                                 pipe_set[group_size] = unsynced_pipes[j];
1465                                 unsynced_pipes[j] = NULL;
1466                                 group_size++;
1467                         } else
1468                         if (sync_type != VBLANK_SYNCHRONIZABLE &&
1469                                 resource_are_streams_timing_synchronizable(
1470                                         unsynced_pipes[j]->stream,
1471                                         pipe_set[0]->stream)) {
1472                                 sync_type = TIMING_SYNCHRONIZABLE;
1473                                 pipe_set[group_size] = unsynced_pipes[j];
1474                                 unsynced_pipes[j] = NULL;
1475                                 group_size++;
1476                         }
1477                 }
1478
1479                 /* set first unblanked pipe as master */
1480                 for (j = 0; j < group_size; j++) {
1481                         bool is_blanked;
1482
1483                         if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
1484                                 is_blanked =
1485                                         pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
1486                         else
1487                                 is_blanked =
1488                                         pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
1489                         if (!is_blanked) {
1490                                 if (j == 0)
1491                                         break;
1492
1493                                 swap(pipe_set[0], pipe_set[j]);
1494                                 break;
1495                         }
1496                 }
1497
1498                 for (k = 0; k < group_size; k++) {
1499                         struct dc_stream_status *status = dc_stream_get_status_from_state(ctx, pipe_set[k]->stream);
1500
1501                         status->timing_sync_info.group_id = num_group;
1502                         status->timing_sync_info.group_size = group_size;
1503                         if (k == 0)
1504                                 status->timing_sync_info.master = true;
1505                         else
1506                                 status->timing_sync_info.master = false;
1507
1508                 }
1509
1510                 /* remove any other pipes that are already been synced */
1511                 if (dc->config.use_pipe_ctx_sync_logic) {
1512                         /* check pipe's syncd to decide which pipe to be removed */
1513                         for (j = 1; j < group_size; j++) {
1514                                 if (pipe_set[j]->pipe_idx_syncd == pipe_set[0]->pipe_idx_syncd) {
1515                                         group_size--;
1516                                         pipe_set[j] = pipe_set[group_size];
1517                                         j--;
1518                                 } else
1519                                         /* link slave pipe's syncd with master pipe */
1520                                         pipe_set[j]->pipe_idx_syncd = pipe_set[0]->pipe_idx_syncd;
1521                         }
1522                 } else {
1523                         for (j = j + 1; j < group_size; j++) {
1524                                 bool is_blanked;
1525
1526                                 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
1527                                         is_blanked =
1528                                                 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
1529                                 else
1530                                         is_blanked =
1531                                                 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
1532                                 if (!is_blanked) {
1533                                         group_size--;
1534                                         pipe_set[j] = pipe_set[group_size];
1535                                         j--;
1536                                 }
1537                         }
1538                 }
1539
1540                 if (group_size > 1) {
1541                         if (sync_type == TIMING_SYNCHRONIZABLE) {
1542                                 dc->hwss.enable_timing_synchronization(
1543                                         dc, group_index, group_size, pipe_set);
1544                         } else
1545                                 if (sync_type == VBLANK_SYNCHRONIZABLE) {
1546                                 dc->hwss.enable_vblanks_synchronization(
1547                                         dc, group_index, group_size, pipe_set);
1548                                 }
1549                         group_index++;
1550                 }
1551                 num_group++;
1552         }
1553 }
1554
1555 static bool streams_changed(struct dc *dc,
1556                             struct dc_stream_state *streams[],
1557                             uint8_t stream_count)
1558 {
1559         uint8_t i;
1560
1561         if (stream_count != dc->current_state->stream_count)
1562                 return true;
1563
1564         for (i = 0; i < dc->current_state->stream_count; i++) {
1565                 if (dc->current_state->streams[i] != streams[i])
1566                         return true;
1567                 if (!streams[i]->link->link_state_valid)
1568                         return true;
1569         }
1570
1571         return false;
1572 }
1573
1574 bool dc_validate_boot_timing(const struct dc *dc,
1575                                 const struct dc_sink *sink,
1576                                 struct dc_crtc_timing *crtc_timing)
1577 {
1578         struct timing_generator *tg;
1579         struct stream_encoder *se = NULL;
1580
1581         struct dc_crtc_timing hw_crtc_timing = {0};
1582
1583         struct dc_link *link = sink->link;
1584         unsigned int i, enc_inst, tg_inst = 0;
1585
1586         /* Support seamless boot on EDP displays only */
1587         if (sink->sink_signal != SIGNAL_TYPE_EDP) {
1588                 return false;
1589         }
1590
1591         /* Check for enabled DIG to identify enabled display */
1592         if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
1593                 return false;
1594
1595         enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
1596
1597         if (enc_inst == ENGINE_ID_UNKNOWN)
1598                 return false;
1599
1600         for (i = 0; i < dc->res_pool->stream_enc_count; i++) {
1601                 if (dc->res_pool->stream_enc[i]->id == enc_inst) {
1602
1603                         se = dc->res_pool->stream_enc[i];
1604
1605                         tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg(
1606                                 dc->res_pool->stream_enc[i]);
1607                         break;
1608                 }
1609         }
1610
1611         // tg_inst not found
1612         if (i == dc->res_pool->stream_enc_count)
1613                 return false;
1614
1615         if (tg_inst >= dc->res_pool->timing_generator_count)
1616                 return false;
1617
1618         if (tg_inst != link->link_enc->preferred_engine)
1619                 return false;
1620
1621         tg = dc->res_pool->timing_generators[tg_inst];
1622
1623         if (!tg->funcs->get_hw_timing)
1624                 return false;
1625
1626         if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing))
1627                 return false;
1628
1629         if (crtc_timing->h_total != hw_crtc_timing.h_total)
1630                 return false;
1631
1632         if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left)
1633                 return false;
1634
1635         if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable)
1636                 return false;
1637
1638         if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right)
1639                 return false;
1640
1641         if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch)
1642                 return false;
1643
1644         if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width)
1645                 return false;
1646
1647         if (crtc_timing->v_total != hw_crtc_timing.v_total)
1648                 return false;
1649
1650         if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top)
1651                 return false;
1652
1653         if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable)
1654                 return false;
1655
1656         if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom)
1657                 return false;
1658
1659         if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch)
1660                 return false;
1661
1662         if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width)
1663                 return false;
1664
1665         /* block DSC for now, as VBIOS does not currently support DSC timings */
1666         if (crtc_timing->flags.DSC)
1667                 return false;
1668
1669         if (dc_is_dp_signal(link->connector_signal)) {
1670                 unsigned int pix_clk_100hz;
1671                 uint32_t numOdmPipes = 1;
1672                 uint32_t id_src[4] = {0};
1673
1674                 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1675                         dc->res_pool->dp_clock_source,
1676                         tg_inst, &pix_clk_100hz);
1677
1678                 if (tg->funcs->get_optc_source)
1679                         tg->funcs->get_optc_source(tg,
1680                                                 &numOdmPipes, &id_src[0], &id_src[1]);
1681
1682                 if (numOdmPipes == 2)
1683                         pix_clk_100hz *= 2;
1684                 if (numOdmPipes == 4)
1685                         pix_clk_100hz *= 4;
1686
1687                 // Note: In rare cases, HW pixclk may differ from crtc's pixclk
1688                 // slightly due to rounding issues in 10 kHz units.
1689                 if (crtc_timing->pix_clk_100hz != pix_clk_100hz)
1690                         return false;
1691
1692                 if (!se->funcs->dp_get_pixel_format)
1693                         return false;
1694
1695                 if (!se->funcs->dp_get_pixel_format(
1696                         se,
1697                         &hw_crtc_timing.pixel_encoding,
1698                         &hw_crtc_timing.display_color_depth))
1699                         return false;
1700
1701                 if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth)
1702                         return false;
1703
1704                 if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding)
1705                         return false;
1706         }
1707
1708         if (link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
1709                 return false;
1710         }
1711
1712         if (link_is_edp_ilr_optimization_required(link, crtc_timing)) {
1713                 DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n");
1714                 return false;
1715         }
1716
1717         return true;
1718 }
1719
1720 static inline bool should_update_pipe_for_stream(
1721                 struct dc_state *context,
1722                 struct pipe_ctx *pipe_ctx,
1723                 struct dc_stream_state *stream)
1724 {
1725         return (pipe_ctx->stream && pipe_ctx->stream == stream);
1726 }
1727
1728 static inline bool should_update_pipe_for_plane(
1729                 struct dc_state *context,
1730                 struct pipe_ctx *pipe_ctx,
1731                 struct dc_plane_state *plane_state)
1732 {
1733         return (pipe_ctx->plane_state == plane_state);
1734 }
1735
1736 void dc_enable_stereo(
1737         struct dc *dc,
1738         struct dc_state *context,
1739         struct dc_stream_state *streams[],
1740         uint8_t stream_count)
1741 {
1742         int i, j;
1743         struct pipe_ctx *pipe;
1744
1745         for (i = 0; i < MAX_PIPES; i++) {
1746                 if (context != NULL) {
1747                         pipe = &context->res_ctx.pipe_ctx[i];
1748                 } else {
1749                         context = dc->current_state;
1750                         pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1751                 }
1752
1753                 for (j = 0; pipe && j < stream_count; j++)  {
1754                         if (should_update_pipe_for_stream(context, pipe, streams[j]) &&
1755                                 dc->hwss.setup_stereo)
1756                                 dc->hwss.setup_stereo(pipe, dc);
1757                 }
1758         }
1759 }
1760
1761 void dc_trigger_sync(struct dc *dc, struct dc_state *context)
1762 {
1763         if (context->stream_count > 1 && !dc->debug.disable_timing_sync) {
1764                 enable_timing_multisync(dc, context);
1765                 program_timing_sync(dc, context);
1766         }
1767 }
1768
1769 static uint8_t get_stream_mask(struct dc *dc, struct dc_state *context)
1770 {
1771         int i;
1772         unsigned int stream_mask = 0;
1773
1774         for (i = 0; i < dc->res_pool->pipe_count; i++) {
1775                 if (context->res_ctx.pipe_ctx[i].stream)
1776                         stream_mask |= 1 << i;
1777         }
1778
1779         return stream_mask;
1780 }
1781
1782 void dc_z10_restore(const struct dc *dc)
1783 {
1784         if (dc->hwss.z10_restore)
1785                 dc->hwss.z10_restore(dc);
1786 }
1787
1788 void dc_z10_save_init(struct dc *dc)
1789 {
1790         if (dc->hwss.z10_save_init)
1791                 dc->hwss.z10_save_init(dc);
1792 }
1793
1794 /**
1795  * dc_commit_state_no_check - Apply context to the hardware
1796  *
1797  * @dc: DC object with the current status to be updated
1798  * @context: New state that will become the current status at the end of this function
1799  *
1800  * Applies given context to the hardware and copy it into current context.
1801  * It's up to the user to release the src context afterwards.
1802  *
1803  * Return: an enum dc_status result code for the operation
1804  */
1805 static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)
1806 {
1807         struct dc_bios *dcb = dc->ctx->dc_bios;
1808         enum dc_status result = DC_ERROR_UNEXPECTED;
1809         struct pipe_ctx *pipe;
1810         int i, k, l;
1811         struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
1812         struct dc_state *old_state;
1813         bool subvp_prev_use = false;
1814
1815         dc_z10_restore(dc);
1816         dc_allow_idle_optimizations(dc, false);
1817
1818         for (i = 0; i < dc->res_pool->pipe_count; i++) {
1819                 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1820
1821                 /* Check old context for SubVP */
1822                 subvp_prev_use |= (old_pipe->stream && old_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM);
1823                 if (subvp_prev_use)
1824                         break;
1825         }
1826
1827         for (i = 0; i < context->stream_count; i++)
1828                 dc_streams[i] =  context->streams[i];
1829
1830         if (!dcb->funcs->is_accelerated_mode(dcb)) {
1831                 disable_vbios_mode_if_required(dc, context);
1832                 dc->hwss.enable_accelerated_mode(dc, context);
1833         }
1834
1835         if (context->stream_count > get_seamless_boot_stream_count(context) ||
1836                 context->stream_count == 0)
1837                 dc->hwss.prepare_bandwidth(dc, context);
1838
1839         /* When SubVP is active, all HW programming must be done while
1840          * SubVP lock is acquired
1841          */
1842         if (dc->hwss.subvp_pipe_control_lock)
1843                 dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use);
1844
1845         if (dc->debug.enable_double_buffered_dsc_pg_support)
1846                 dc->hwss.update_dsc_pg(dc, context, false);
1847
1848         disable_dangling_plane(dc, context);
1849         /* re-program planes for existing stream, in case we need to
1850          * free up plane resource for later use
1851          */
1852         if (dc->hwss.apply_ctx_for_surface) {
1853                 for (i = 0; i < context->stream_count; i++) {
1854                         if (context->streams[i]->mode_changed)
1855                                 continue;
1856                         apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
1857                         dc->hwss.apply_ctx_for_surface(
1858                                 dc, context->streams[i],
1859                                 context->stream_status[i].plane_count,
1860                                 context); /* use new pipe config in new context */
1861                         apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
1862                         dc->hwss.post_unlock_program_front_end(dc, context);
1863                 }
1864         }
1865
1866         /* Program hardware */
1867         for (i = 0; i < dc->res_pool->pipe_count; i++) {
1868                 pipe = &context->res_ctx.pipe_ctx[i];
1869                 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
1870         }
1871
1872         result = dc->hwss.apply_ctx_to_hw(dc, context);
1873
1874         if (result != DC_OK) {
1875                 /* Application of dc_state to hardware stopped. */
1876                 dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY;
1877                 return result;
1878         }
1879
1880         dc_trigger_sync(dc, context);
1881
1882         /* Program all planes within new context*/
1883         if (dc->hwss.program_front_end_for_ctx) {
1884                 dc->hwss.interdependent_update_lock(dc, context, true);
1885                 dc->hwss.program_front_end_for_ctx(dc, context);
1886                 dc->hwss.interdependent_update_lock(dc, context, false);
1887                 dc->hwss.post_unlock_program_front_end(dc, context);
1888         }
1889
1890         if (dc->hwss.commit_subvp_config)
1891                 dc->hwss.commit_subvp_config(dc, context);
1892         if (dc->hwss.subvp_pipe_control_lock)
1893                 dc->hwss.subvp_pipe_control_lock(dc, context, false, true, NULL, subvp_prev_use);
1894
1895         for (i = 0; i < context->stream_count; i++) {
1896                 const struct dc_link *link = context->streams[i]->link;
1897
1898                 if (!context->streams[i]->mode_changed)
1899                         continue;
1900
1901                 if (dc->hwss.apply_ctx_for_surface) {
1902                         apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
1903                         dc->hwss.apply_ctx_for_surface(
1904                                         dc, context->streams[i],
1905                                         context->stream_status[i].plane_count,
1906                                         context);
1907                         apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
1908                         dc->hwss.post_unlock_program_front_end(dc, context);
1909                 }
1910
1911                 /*
1912                  * enable stereo
1913                  * TODO rework dc_enable_stereo call to work with validation sets?
1914                  */
1915                 for (k = 0; k < MAX_PIPES; k++) {
1916                         pipe = &context->res_ctx.pipe_ctx[k];
1917
1918                         for (l = 0 ; pipe && l < context->stream_count; l++)  {
1919                                 if (context->streams[l] &&
1920                                         context->streams[l] == pipe->stream &&
1921                                         dc->hwss.setup_stereo)
1922                                         dc->hwss.setup_stereo(pipe, dc);
1923                         }
1924                 }
1925
1926                 CONN_MSG_MODE(link, "{%dx%d, %dx%d@%dKhz}",
1927                                 context->streams[i]->timing.h_addressable,
1928                                 context->streams[i]->timing.v_addressable,
1929                                 context->streams[i]->timing.h_total,
1930                                 context->streams[i]->timing.v_total,
1931                                 context->streams[i]->timing.pix_clk_100hz / 10);
1932         }
1933
1934         dc_enable_stereo(dc, context, dc_streams, context->stream_count);
1935
1936         if (context->stream_count > get_seamless_boot_stream_count(context) ||
1937                 context->stream_count == 0) {
1938                 /* Must wait for no flips to be pending before doing optimize bw */
1939                 wait_for_no_pipes_pending(dc, context);
1940                 /* pplib is notified if disp_num changed */
1941                 dc->hwss.optimize_bandwidth(dc, context);
1942         }
1943
1944         if (dc->debug.enable_double_buffered_dsc_pg_support)
1945                 dc->hwss.update_dsc_pg(dc, context, true);
1946
1947         if (dc->ctx->dce_version >= DCE_VERSION_MAX)
1948                 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
1949         else
1950                 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
1951
1952         context->stream_mask = get_stream_mask(dc, context);
1953
1954         if (context->stream_mask != dc->current_state->stream_mask)
1955                 dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, context->stream_mask);
1956
1957         for (i = 0; i < context->stream_count; i++)
1958                 context->streams[i]->mode_changed = false;
1959
1960         old_state = dc->current_state;
1961         dc->current_state = context;
1962
1963         dc_release_state(old_state);
1964
1965         dc_retain_state(dc->current_state);
1966
1967         return result;
1968 }
1969
1970 /**
1971  * dc_commit_streams - Commit current stream state
1972  *
1973  * @dc: DC object with the commit state to be configured in the hardware
1974  * @streams: Array with a list of stream state
1975  * @stream_count: Total of streams
1976  *
1977  * Function responsible for commit streams change to the hardware.
1978  *
1979  * Return:
1980  * Return DC_OK if everything work as expected, otherwise, return a dc_status
1981  * code.
1982  */
1983 enum dc_status dc_commit_streams(struct dc *dc,
1984                                  struct dc_stream_state *streams[],
1985                                  uint8_t stream_count)
1986 {
1987         int i, j;
1988         struct dc_state *context;
1989         enum dc_status res = DC_OK;
1990         struct dc_validation_set set[MAX_STREAMS] = {0};
1991
1992         if (dc->ctx->dce_environment == DCE_ENV_VIRTUAL_HW)
1993                 return res;
1994
1995         if (!streams_changed(dc, streams, stream_count))
1996                 return res;
1997
1998         DC_LOG_DC("%s: %d streams\n", __func__, stream_count);
1999
2000         for (i = 0; i < stream_count; i++) {
2001                 struct dc_stream_state *stream = streams[i];
2002                 struct dc_stream_status *status = dc_stream_get_status(stream);
2003
2004                 dc_stream_log(dc, stream);
2005
2006                 set[i].stream = stream;
2007
2008                 if (status) {
2009                         set[i].plane_count = status->plane_count;
2010                         for (j = 0; j < status->plane_count; j++)
2011                                 set[i].plane_states[j] = status->plane_states[j];
2012                 }
2013         }
2014
2015         context = dc_create_state(dc);
2016         if (!context)
2017                 goto context_alloc_fail;
2018
2019         dc_resource_state_copy_construct_current(dc, context);
2020
2021         res = dc_validate_with_context(dc, set, stream_count, context, false);
2022         if (res != DC_OK) {
2023                 BREAK_TO_DEBUGGER();
2024                 goto fail;
2025         }
2026
2027         res = dc_commit_state_no_check(dc, context);
2028
2029         for (i = 0; i < stream_count; i++) {
2030                 for (j = 0; j < context->stream_count; j++) {
2031                         if (streams[i]->stream_id == context->streams[j]->stream_id)
2032                                 streams[i]->out.otg_offset = context->stream_status[j].primary_otg_inst;
2033
2034                         if (dc_is_embedded_signal(streams[i]->signal)) {
2035                                 struct dc_stream_status *status = dc_stream_get_status_from_state(context, streams[i]);
2036
2037                                 if (dc->hwss.is_abm_supported)
2038                                         status->is_abm_supported = dc->hwss.is_abm_supported(dc, context, streams[i]);
2039                                 else
2040                                         status->is_abm_supported = true;
2041                         }
2042                 }
2043         }
2044
2045 fail:
2046         dc_release_state(context);
2047
2048 context_alloc_fail:
2049
2050         DC_LOG_DC("%s Finished.\n", __func__);
2051
2052         return res;
2053 }
2054
2055 /* TODO: When the transition to the new commit sequence is done, remove this
2056  * function in favor of dc_commit_streams. */
2057 bool dc_commit_state(struct dc *dc, struct dc_state *context)
2058 {
2059         enum dc_status result = DC_ERROR_UNEXPECTED;
2060         int i;
2061
2062         /* TODO: Since change commit sequence can have a huge impact,
2063          * we decided to only enable it for DCN3x. However, as soon as
2064          * we get more confident about this change we'll need to enable
2065          * the new sequence for all ASICs. */
2066         if (dc->ctx->dce_version >= DCN_VERSION_3_2) {
2067                 result = dc_commit_streams(dc, context->streams, context->stream_count);
2068                 return result == DC_OK;
2069         }
2070
2071         if (!streams_changed(dc, context->streams, context->stream_count)) {
2072                 return DC_OK;
2073         }
2074
2075         DC_LOG_DC("%s: %d streams\n",
2076                                 __func__, context->stream_count);
2077
2078         for (i = 0; i < context->stream_count; i++) {
2079                 struct dc_stream_state *stream = context->streams[i];
2080
2081                 dc_stream_log(dc, stream);
2082         }
2083
2084         /*
2085          * Previous validation was perfomred with fast_validation = true and
2086          * the full DML state required for hardware programming was skipped.
2087          *
2088          * Re-validate here to calculate these parameters / watermarks.
2089          */
2090         result = dc_validate_global_state(dc, context, false);
2091         if (result != DC_OK) {
2092                 DC_LOG_ERROR("DC commit global validation failure: %s (%d)",
2093                              dc_status_to_str(result), result);
2094                 return result;
2095         }
2096
2097         result = dc_commit_state_no_check(dc, context);
2098
2099         return (result == DC_OK);
2100 }
2101
2102 bool dc_acquire_release_mpc_3dlut(
2103                 struct dc *dc, bool acquire,
2104                 struct dc_stream_state *stream,
2105                 struct dc_3dlut **lut,
2106                 struct dc_transfer_func **shaper)
2107 {
2108         int pipe_idx;
2109         bool ret = false;
2110         bool found_pipe_idx = false;
2111         const struct resource_pool *pool = dc->res_pool;
2112         struct resource_context *res_ctx = &dc->current_state->res_ctx;
2113         int mpcc_id = 0;
2114
2115         if (pool && res_ctx) {
2116                 if (acquire) {
2117                         /*find pipe idx for the given stream*/
2118                         for (pipe_idx = 0; pipe_idx < pool->pipe_count; pipe_idx++) {
2119                                 if (res_ctx->pipe_ctx[pipe_idx].stream == stream) {
2120                                         found_pipe_idx = true;
2121                                         mpcc_id = res_ctx->pipe_ctx[pipe_idx].plane_res.hubp->inst;
2122                                         break;
2123                                 }
2124                         }
2125                 } else
2126                         found_pipe_idx = true;/*for release pipe_idx is not required*/
2127
2128                 if (found_pipe_idx) {
2129                         if (acquire && pool->funcs->acquire_post_bldn_3dlut)
2130                                 ret = pool->funcs->acquire_post_bldn_3dlut(res_ctx, pool, mpcc_id, lut, shaper);
2131                         else if (!acquire && pool->funcs->release_post_bldn_3dlut)
2132                                 ret = pool->funcs->release_post_bldn_3dlut(res_ctx, pool, lut, shaper);
2133                 }
2134         }
2135         return ret;
2136 }
2137
2138 static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
2139 {
2140         int i;
2141         struct pipe_ctx *pipe;
2142
2143         for (i = 0; i < MAX_PIPES; i++) {
2144                 pipe = &context->res_ctx.pipe_ctx[i];
2145
2146                 // Don't check flip pending on phantom pipes
2147                 if (!pipe->plane_state || (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM))
2148                         continue;
2149
2150                 /* Must set to false to start with, due to OR in update function */
2151                 pipe->plane_state->status.is_flip_pending = false;
2152                 dc->hwss.update_pending_status(pipe);
2153                 if (pipe->plane_state->status.is_flip_pending)
2154                         return true;
2155         }
2156         return false;
2157 }
2158
2159 /* Perform updates here which need to be deferred until next vupdate
2160  *
2161  * i.e. blnd lut, 3dlut, and shaper lut bypass regs are double buffered
2162  * but forcing lut memory to shutdown state is immediate. This causes
2163  * single frame corruption as lut gets disabled mid-frame unless shutdown
2164  * is deferred until after entering bypass.
2165  */
2166 static void process_deferred_updates(struct dc *dc)
2167 {
2168         int i = 0;
2169
2170         if (dc->debug.enable_mem_low_power.bits.cm) {
2171                 ASSERT(dc->dcn_ip->max_num_dpp);
2172                 for (i = 0; i < dc->dcn_ip->max_num_dpp; i++)
2173                         if (dc->res_pool->dpps[i]->funcs->dpp_deferred_update)
2174                                 dc->res_pool->dpps[i]->funcs->dpp_deferred_update(dc->res_pool->dpps[i]);
2175         }
2176 }
2177
2178 void dc_post_update_surfaces_to_stream(struct dc *dc)
2179 {
2180         int i;
2181         struct dc_state *context = dc->current_state;
2182
2183         if ((!dc->optimized_required) || get_seamless_boot_stream_count(context) > 0)
2184                 return;
2185
2186         post_surface_trace(dc);
2187
2188         /*
2189          * Only relevant for DCN behavior where we can guarantee the optimization
2190          * is safe to apply - retain the legacy behavior for DCE.
2191          */
2192
2193         if (dc->ctx->dce_version < DCE_VERSION_MAX)
2194                 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
2195         else {
2196                 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
2197
2198                 if (is_flip_pending_in_pipes(dc, context))
2199                         return;
2200
2201                 for (i = 0; i < dc->res_pool->pipe_count; i++)
2202                         if (context->res_ctx.pipe_ctx[i].stream == NULL ||
2203                                         context->res_ctx.pipe_ctx[i].plane_state == NULL) {
2204                                 context->res_ctx.pipe_ctx[i].pipe_idx = i;
2205                                 dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
2206                         }
2207
2208                 process_deferred_updates(dc);
2209
2210                 dc->hwss.optimize_bandwidth(dc, context);
2211
2212                 if (dc->debug.enable_double_buffered_dsc_pg_support)
2213                         dc->hwss.update_dsc_pg(dc, context, true);
2214         }
2215
2216         dc->optimized_required = false;
2217         dc->wm_optimized_required = false;
2218 }
2219
2220 static void init_state(struct dc *dc, struct dc_state *context)
2221 {
2222         /* Each context must have their own instance of VBA and in order to
2223          * initialize and obtain IP and SOC the base DML instance from DC is
2224          * initially copied into every context
2225          */
2226         memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
2227 }
2228
2229 struct dc_state *dc_create_state(struct dc *dc)
2230 {
2231         struct dc_state *context = kvzalloc(sizeof(struct dc_state),
2232                                             GFP_KERNEL);
2233
2234         if (!context)
2235                 return NULL;
2236
2237         init_state(dc, context);
2238
2239         kref_init(&context->refcount);
2240
2241         return context;
2242 }
2243
2244 struct dc_state *dc_copy_state(struct dc_state *src_ctx)
2245 {
2246         int i, j;
2247         struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL);
2248
2249         if (!new_ctx)
2250                 return NULL;
2251         memcpy(new_ctx, src_ctx, sizeof(struct dc_state));
2252
2253         for (i = 0; i < MAX_PIPES; i++) {
2254                         struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i];
2255
2256                         if (cur_pipe->top_pipe)
2257                                 cur_pipe->top_pipe =  &new_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
2258
2259                         if (cur_pipe->bottom_pipe)
2260                                 cur_pipe->bottom_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
2261
2262                         if (cur_pipe->prev_odm_pipe)
2263                                 cur_pipe->prev_odm_pipe =  &new_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx];
2264
2265                         if (cur_pipe->next_odm_pipe)
2266                                 cur_pipe->next_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx];
2267
2268         }
2269
2270         for (i = 0; i < new_ctx->stream_count; i++) {
2271                         dc_stream_retain(new_ctx->streams[i]);
2272                         for (j = 0; j < new_ctx->stream_status[i].plane_count; j++)
2273                                 dc_plane_state_retain(
2274                                         new_ctx->stream_status[i].plane_states[j]);
2275         }
2276
2277         kref_init(&new_ctx->refcount);
2278
2279         return new_ctx;
2280 }
2281
2282 void dc_retain_state(struct dc_state *context)
2283 {
2284         kref_get(&context->refcount);
2285 }
2286
2287 static void dc_state_free(struct kref *kref)
2288 {
2289         struct dc_state *context = container_of(kref, struct dc_state, refcount);
2290         dc_resource_state_destruct(context);
2291         kvfree(context);
2292 }
2293
2294 void dc_release_state(struct dc_state *context)
2295 {
2296         kref_put(&context->refcount, dc_state_free);
2297 }
2298
2299 bool dc_set_generic_gpio_for_stereo(bool enable,
2300                 struct gpio_service *gpio_service)
2301 {
2302         enum gpio_result gpio_result = GPIO_RESULT_NON_SPECIFIC_ERROR;
2303         struct gpio_pin_info pin_info;
2304         struct gpio *generic;
2305         struct gpio_generic_mux_config *config = kzalloc(sizeof(struct gpio_generic_mux_config),
2306                            GFP_KERNEL);
2307
2308         if (!config)
2309                 return false;
2310         pin_info = dal_gpio_get_generic_pin_info(gpio_service, GPIO_ID_GENERIC, 0);
2311
2312         if (pin_info.mask == 0xFFFFFFFF || pin_info.offset == 0xFFFFFFFF) {
2313                 kfree(config);
2314                 return false;
2315         } else {
2316                 generic = dal_gpio_service_create_generic_mux(
2317                         gpio_service,
2318                         pin_info.offset,
2319                         pin_info.mask);
2320         }
2321
2322         if (!generic) {
2323                 kfree(config);
2324                 return false;
2325         }
2326
2327         gpio_result = dal_gpio_open(generic, GPIO_MODE_OUTPUT);
2328
2329         config->enable_output_from_mux = enable;
2330         config->mux_select = GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC;
2331
2332         if (gpio_result == GPIO_RESULT_OK)
2333                 gpio_result = dal_mux_setup_config(generic, config);
2334
2335         if (gpio_result == GPIO_RESULT_OK) {
2336                 dal_gpio_close(generic);
2337                 dal_gpio_destroy_generic_mux(&generic);
2338                 kfree(config);
2339                 return true;
2340         } else {
2341                 dal_gpio_close(generic);
2342                 dal_gpio_destroy_generic_mux(&generic);
2343                 kfree(config);
2344                 return false;
2345         }
2346 }
2347
2348 static bool is_surface_in_context(
2349                 const struct dc_state *context,
2350                 const struct dc_plane_state *plane_state)
2351 {
2352         int j;
2353
2354         for (j = 0; j < MAX_PIPES; j++) {
2355                 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2356
2357                 if (plane_state == pipe_ctx->plane_state) {
2358                         return true;
2359                 }
2360         }
2361
2362         return false;
2363 }
2364
2365 static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u)
2366 {
2367         union surface_update_flags *update_flags = &u->surface->update_flags;
2368         enum surface_update_type update_type = UPDATE_TYPE_FAST;
2369
2370         if (!u->plane_info)
2371                 return UPDATE_TYPE_FAST;
2372
2373         if (u->plane_info->color_space != u->surface->color_space) {
2374                 update_flags->bits.color_space_change = 1;
2375                 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2376         }
2377
2378         if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) {
2379                 update_flags->bits.horizontal_mirror_change = 1;
2380                 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2381         }
2382
2383         if (u->plane_info->rotation != u->surface->rotation) {
2384                 update_flags->bits.rotation_change = 1;
2385                 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2386         }
2387
2388         if (u->plane_info->format != u->surface->format) {
2389                 update_flags->bits.pixel_format_change = 1;
2390                 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2391         }
2392
2393         if (u->plane_info->stereo_format != u->surface->stereo_format) {
2394                 update_flags->bits.stereo_format_change = 1;
2395                 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2396         }
2397
2398         if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) {
2399                 update_flags->bits.per_pixel_alpha_change = 1;
2400                 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2401         }
2402
2403         if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) {
2404                 update_flags->bits.global_alpha_change = 1;
2405                 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2406         }
2407
2408         if (u->plane_info->dcc.enable != u->surface->dcc.enable
2409                         || u->plane_info->dcc.dcc_ind_blk != u->surface->dcc.dcc_ind_blk
2410                         || u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) {
2411                 /* During DCC on/off, stutter period is calculated before
2412                  * DCC has fully transitioned. This results in incorrect
2413                  * stutter period calculation. Triggering a full update will
2414                  * recalculate stutter period.
2415                  */
2416                 update_flags->bits.dcc_change = 1;
2417                 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2418         }
2419
2420         if (resource_pixel_format_to_bpp(u->plane_info->format) !=
2421                         resource_pixel_format_to_bpp(u->surface->format)) {
2422                 /* different bytes per element will require full bandwidth
2423                  * and DML calculation
2424                  */
2425                 update_flags->bits.bpp_change = 1;
2426                 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2427         }
2428
2429         if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch
2430                         || u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) {
2431                 update_flags->bits.plane_size_change = 1;
2432                 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2433         }
2434
2435
2436         if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
2437                         sizeof(union dc_tiling_info)) != 0) {
2438                 update_flags->bits.swizzle_change = 1;
2439                 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2440
2441                 /* todo: below are HW dependent, we should add a hook to
2442                  * DCE/N resource and validated there.
2443                  */
2444                 if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
2445                         /* swizzled mode requires RQ to be setup properly,
2446                          * thus need to run DML to calculate RQ settings
2447                          */
2448                         update_flags->bits.bandwidth_change = 1;
2449                         elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2450                 }
2451         }
2452
2453         /* This should be UPDATE_TYPE_FAST if nothing has changed. */
2454         return update_type;
2455 }
2456
2457 static enum surface_update_type get_scaling_info_update_type(
2458                 const struct dc_surface_update *u)
2459 {
2460         union surface_update_flags *update_flags = &u->surface->update_flags;
2461
2462         if (!u->scaling_info)
2463                 return UPDATE_TYPE_FAST;
2464
2465         if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width
2466                         || u->scaling_info->clip_rect.height != u->surface->clip_rect.height
2467                         || u->scaling_info->dst_rect.width != u->surface->dst_rect.width
2468                         || u->scaling_info->dst_rect.height != u->surface->dst_rect.height
2469                         || u->scaling_info->scaling_quality.integer_scaling !=
2470                                 u->surface->scaling_quality.integer_scaling
2471                         ) {
2472                 update_flags->bits.scaling_change = 1;
2473
2474                 if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width
2475                         || u->scaling_info->dst_rect.height < u->surface->dst_rect.height)
2476                                 && (u->scaling_info->dst_rect.width < u->surface->src_rect.width
2477                                         || u->scaling_info->dst_rect.height < u->surface->src_rect.height))
2478                         /* Making dst rect smaller requires a bandwidth change */
2479                         update_flags->bits.bandwidth_change = 1;
2480         }
2481
2482         if (u->scaling_info->src_rect.width != u->surface->src_rect.width
2483                 || u->scaling_info->src_rect.height != u->surface->src_rect.height) {
2484
2485                 update_flags->bits.scaling_change = 1;
2486                 if (u->scaling_info->src_rect.width > u->surface->src_rect.width
2487                                 || u->scaling_info->src_rect.height > u->surface->src_rect.height)
2488                         /* Making src rect bigger requires a bandwidth change */
2489                         update_flags->bits.clock_change = 1;
2490         }
2491
2492         if (u->scaling_info->src_rect.x != u->surface->src_rect.x
2493                         || u->scaling_info->src_rect.y != u->surface->src_rect.y
2494                         || u->scaling_info->clip_rect.x != u->surface->clip_rect.x
2495                         || u->scaling_info->clip_rect.y != u->surface->clip_rect.y
2496                         || u->scaling_info->dst_rect.x != u->surface->dst_rect.x
2497                         || u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
2498                 update_flags->bits.position_change = 1;
2499
2500         if (update_flags->bits.clock_change
2501                         || update_flags->bits.bandwidth_change
2502                         || update_flags->bits.scaling_change)
2503                 return UPDATE_TYPE_FULL;
2504
2505         if (update_flags->bits.position_change)
2506                 return UPDATE_TYPE_MED;
2507
2508         return UPDATE_TYPE_FAST;
2509 }
2510
2511 static enum surface_update_type det_surface_update(const struct dc *dc,
2512                 const struct dc_surface_update *u)
2513 {
2514         const struct dc_state *context = dc->current_state;
2515         enum surface_update_type type;
2516         enum surface_update_type overall_type = UPDATE_TYPE_FAST;
2517         union surface_update_flags *update_flags = &u->surface->update_flags;
2518
2519         if (u->flip_addr)
2520                 update_flags->bits.addr_update = 1;
2521
2522         if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) {
2523                 update_flags->raw = 0xFFFFFFFF;
2524                 return UPDATE_TYPE_FULL;
2525         }
2526
2527         update_flags->raw = 0; // Reset all flags
2528
2529         type = get_plane_info_update_type(u);
2530         elevate_update_type(&overall_type, type);
2531
2532         type = get_scaling_info_update_type(u);
2533         elevate_update_type(&overall_type, type);
2534
2535         if (u->flip_addr) {
2536                 update_flags->bits.addr_update = 1;
2537                 if (u->flip_addr->address.tmz_surface != u->surface->address.tmz_surface) {
2538                         update_flags->bits.tmz_changed = 1;
2539                         elevate_update_type(&overall_type, UPDATE_TYPE_FULL);
2540                 }
2541         }
2542         if (u->in_transfer_func)
2543                 update_flags->bits.in_transfer_func_change = 1;
2544
2545         if (u->input_csc_color_matrix)
2546                 update_flags->bits.input_csc_change = 1;
2547
2548         if (u->coeff_reduction_factor)
2549                 update_flags->bits.coeff_reduction_change = 1;
2550
2551         if (u->gamut_remap_matrix)
2552                 update_flags->bits.gamut_remap_change = 1;
2553
2554         if (u->gamma) {
2555                 enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN;
2556
2557                 if (u->plane_info)
2558                         format = u->plane_info->format;
2559                 else if (u->surface)
2560                         format = u->surface->format;
2561
2562                 if (dce_use_lut(format))
2563                         update_flags->bits.gamma_change = 1;
2564         }
2565
2566         if (u->lut3d_func || u->func_shaper)
2567                 update_flags->bits.lut_3d = 1;
2568
2569         if (u->hdr_mult.value)
2570                 if (u->hdr_mult.value != u->surface->hdr_mult.value) {
2571                         update_flags->bits.hdr_mult = 1;
2572                         elevate_update_type(&overall_type, UPDATE_TYPE_MED);
2573                 }
2574
2575         if (update_flags->bits.in_transfer_func_change) {
2576                 type = UPDATE_TYPE_MED;
2577                 elevate_update_type(&overall_type, type);
2578         }
2579
2580         if (update_flags->bits.input_csc_change
2581                         || update_flags->bits.coeff_reduction_change
2582                         || update_flags->bits.lut_3d
2583                         || update_flags->bits.gamma_change
2584                         || update_flags->bits.gamut_remap_change) {
2585                 type = UPDATE_TYPE_FULL;
2586                 elevate_update_type(&overall_type, type);
2587         }
2588
2589         return overall_type;
2590 }
2591
2592 static enum surface_update_type check_update_surfaces_for_stream(
2593                 struct dc *dc,
2594                 struct dc_surface_update *updates,
2595                 int surface_count,
2596                 struct dc_stream_update *stream_update,
2597                 const struct dc_stream_status *stream_status)
2598 {
2599         int i;
2600         enum surface_update_type overall_type = UPDATE_TYPE_FAST;
2601
2602         if (dc->idle_optimizations_allowed)
2603                 overall_type = UPDATE_TYPE_FULL;
2604
2605         if (stream_status == NULL || stream_status->plane_count != surface_count)
2606                 overall_type = UPDATE_TYPE_FULL;
2607
2608         if (stream_update && stream_update->pending_test_pattern) {
2609                 overall_type = UPDATE_TYPE_FULL;
2610         }
2611
2612         /* some stream updates require passive update */
2613         if (stream_update) {
2614                 union stream_update_flags *su_flags = &stream_update->stream->update_flags;
2615
2616                 if ((stream_update->src.height != 0 && stream_update->src.width != 0) ||
2617                         (stream_update->dst.height != 0 && stream_update->dst.width != 0) ||
2618                         stream_update->integer_scaling_update)
2619                         su_flags->bits.scaling = 1;
2620
2621                 if (stream_update->out_transfer_func)
2622                         su_flags->bits.out_tf = 1;
2623
2624                 if (stream_update->abm_level)
2625                         su_flags->bits.abm_level = 1;
2626
2627                 if (stream_update->dpms_off)
2628                         su_flags->bits.dpms_off = 1;
2629
2630                 if (stream_update->gamut_remap)
2631                         su_flags->bits.gamut_remap = 1;
2632
2633                 if (stream_update->wb_update)
2634                         su_flags->bits.wb_update = 1;
2635
2636                 if (stream_update->dsc_config)
2637                         su_flags->bits.dsc_changed = 1;
2638
2639                 if (stream_update->mst_bw_update)
2640                         su_flags->bits.mst_bw = 1;
2641                 if (stream_update->crtc_timing_adjust && dc_extended_blank_supported(dc))
2642                         su_flags->bits.crtc_timing_adjust = 1;
2643
2644                 if (su_flags->raw != 0)
2645                         overall_type = UPDATE_TYPE_FULL;
2646
2647                 if (stream_update->output_csc_transform || stream_update->output_color_space)
2648                         su_flags->bits.out_csc = 1;
2649         }
2650
2651         for (i = 0 ; i < surface_count; i++) {
2652                 enum surface_update_type type =
2653                                 det_surface_update(dc, &updates[i]);
2654
2655                 elevate_update_type(&overall_type, type);
2656         }
2657
2658         return overall_type;
2659 }
2660
2661 static bool dc_check_is_fullscreen_video(struct rect src, struct rect clip_rect)
2662 {
2663         int view_height, view_width, clip_x, clip_y, clip_width, clip_height;
2664
2665         view_height = src.height;
2666         view_width = src.width;
2667
2668         clip_x = clip_rect.x;
2669         clip_y = clip_rect.y;
2670
2671         clip_width = clip_rect.width;
2672         clip_height = clip_rect.height;
2673
2674         /* check for centered video accounting for off by 1 scaling truncation */
2675         if ((view_height - clip_y - clip_height <= clip_y + 1) &&
2676                         (view_width - clip_x - clip_width <= clip_x + 1) &&
2677                         (view_height - clip_y - clip_height >= clip_y - 1) &&
2678                         (view_width - clip_x - clip_width >= clip_x - 1)) {
2679
2680                 /* when OS scales up/down to letter box, it may end up
2681                  * with few blank pixels on the border due to truncating.
2682                  * Add offset margin to account for this
2683                  */
2684                 if (clip_x <= 4 || clip_y <= 4)
2685                         return true;
2686         }
2687
2688         return false;
2689 }
2690
2691 static enum surface_update_type check_boundary_crossing_for_windowed_mpo_with_odm(struct dc *dc,
2692                 struct dc_surface_update *srf_updates, int surface_count,
2693                 enum surface_update_type update_type)
2694 {
2695         enum surface_update_type new_update_type = update_type;
2696         int i, j;
2697         struct pipe_ctx *pipe = NULL;
2698         struct dc_stream_state *stream;
2699
2700         /* Check that we are in windowed MPO with ODM
2701          * - look for MPO pipe by scanning pipes for first pipe matching
2702          *   surface that has moved ( position change )
2703          * - MPO pipe will have top pipe
2704          * - check that top pipe has ODM pointer
2705          */
2706         if ((surface_count > 1) && dc->config.enable_windowed_mpo_odm) {
2707                 for (i = 0; i < surface_count; i++) {
2708                         if (srf_updates[i].surface && srf_updates[i].scaling_info
2709                                         && srf_updates[i].surface->update_flags.bits.position_change) {
2710
2711                                 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2712                                         if (srf_updates[i].surface == dc->current_state->res_ctx.pipe_ctx[j].plane_state) {
2713                                                 pipe = &dc->current_state->res_ctx.pipe_ctx[j];
2714                                                 stream = pipe->stream;
2715                                                 break;
2716                                         }
2717                                 }
2718
2719                                 if (pipe && pipe->top_pipe && (get_num_odm_splits(pipe->top_pipe) > 0) && stream
2720                                                 && !dc_check_is_fullscreen_video(stream->src, srf_updates[i].scaling_info->clip_rect)) {
2721                                         struct rect old_clip_rect, new_clip_rect;
2722                                         bool old_clip_rect_left, old_clip_rect_right, old_clip_rect_middle;
2723                                         bool new_clip_rect_left, new_clip_rect_right, new_clip_rect_middle;
2724
2725                                         old_clip_rect = srf_updates[i].surface->clip_rect;
2726                                         new_clip_rect = srf_updates[i].scaling_info->clip_rect;
2727
2728                                         old_clip_rect_left = ((old_clip_rect.x + old_clip_rect.width) <= (stream->src.x + (stream->src.width/2)));
2729                                         old_clip_rect_right = (old_clip_rect.x >= (stream->src.x + (stream->src.width/2)));
2730                                         old_clip_rect_middle = !old_clip_rect_left && !old_clip_rect_right;
2731
2732                                         new_clip_rect_left = ((new_clip_rect.x + new_clip_rect.width) <= (stream->src.x + (stream->src.width/2)));
2733                                         new_clip_rect_right = (new_clip_rect.x >= (stream->src.x + (stream->src.width/2)));
2734                                         new_clip_rect_middle = !new_clip_rect_left && !new_clip_rect_right;
2735
2736                                         if (old_clip_rect_left && new_clip_rect_middle)
2737                                                 new_update_type = UPDATE_TYPE_FULL;
2738                                         else if (old_clip_rect_middle && new_clip_rect_right)
2739                                                 new_update_type = UPDATE_TYPE_FULL;
2740                                         else if (old_clip_rect_right && new_clip_rect_middle)
2741                                                 new_update_type = UPDATE_TYPE_FULL;
2742                                         else if (old_clip_rect_middle && new_clip_rect_left)
2743                                                 new_update_type = UPDATE_TYPE_FULL;
2744                                 }
2745                         }
2746                 }
2747         }
2748         return new_update_type;
2749 }
2750
2751 /*
2752  * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full)
2753  *
2754  * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types
2755  */
2756 enum surface_update_type dc_check_update_surfaces_for_stream(
2757                 struct dc *dc,
2758                 struct dc_surface_update *updates,
2759                 int surface_count,
2760                 struct dc_stream_update *stream_update,
2761                 const struct dc_stream_status *stream_status)
2762 {
2763         int i;
2764         enum surface_update_type type;
2765
2766         if (stream_update)
2767                 stream_update->stream->update_flags.raw = 0;
2768         for (i = 0; i < surface_count; i++)
2769                 updates[i].surface->update_flags.raw = 0;
2770
2771         type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
2772         if (type == UPDATE_TYPE_FULL) {
2773                 if (stream_update) {
2774                         uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed;
2775                         stream_update->stream->update_flags.raw = 0xFFFFFFFF;
2776                         stream_update->stream->update_flags.bits.dsc_changed = dsc_changed;
2777                 }
2778                 for (i = 0; i < surface_count; i++)
2779                         updates[i].surface->update_flags.raw = 0xFFFFFFFF;
2780         }
2781
2782         if (type == UPDATE_TYPE_MED)
2783                 type = check_boundary_crossing_for_windowed_mpo_with_odm(dc,
2784                                 updates, surface_count, type);
2785
2786         if (type == UPDATE_TYPE_FAST) {
2787                 // If there's an available clock comparator, we use that.
2788                 if (dc->clk_mgr->funcs->are_clock_states_equal) {
2789                         if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk))
2790                                 dc->optimized_required = true;
2791                 // Else we fallback to mem compare.
2792                 } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
2793                         dc->optimized_required = true;
2794                 }
2795
2796                 dc->optimized_required |= dc->wm_optimized_required;
2797         }
2798
2799         return type;
2800 }
2801
2802 static struct dc_stream_status *stream_get_status(
2803         struct dc_state *ctx,
2804         struct dc_stream_state *stream)
2805 {
2806         uint8_t i;
2807
2808         for (i = 0; i < ctx->stream_count; i++) {
2809                 if (stream == ctx->streams[i]) {
2810                         return &ctx->stream_status[i];
2811                 }
2812         }
2813
2814         return NULL;
2815 }
2816
2817 static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
2818
2819 static void copy_surface_update_to_plane(
2820                 struct dc_plane_state *surface,
2821                 struct dc_surface_update *srf_update)
2822 {
2823         if (srf_update->flip_addr) {
2824                 surface->address = srf_update->flip_addr->address;
2825                 surface->flip_immediate =
2826                         srf_update->flip_addr->flip_immediate;
2827                 surface->time.time_elapsed_in_us[surface->time.index] =
2828                         srf_update->flip_addr->flip_timestamp_in_us -
2829                                 surface->time.prev_update_time_in_us;
2830                 surface->time.prev_update_time_in_us =
2831                         srf_update->flip_addr->flip_timestamp_in_us;
2832                 surface->time.index++;
2833                 if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX)
2834                         surface->time.index = 0;
2835
2836                 surface->triplebuffer_flips = srf_update->flip_addr->triplebuffer_flips;
2837         }
2838
2839         if (srf_update->scaling_info) {
2840                 surface->scaling_quality =
2841                                 srf_update->scaling_info->scaling_quality;
2842                 surface->dst_rect =
2843                                 srf_update->scaling_info->dst_rect;
2844                 surface->src_rect =
2845                                 srf_update->scaling_info->src_rect;
2846                 surface->clip_rect =
2847                                 srf_update->scaling_info->clip_rect;
2848         }
2849
2850         if (srf_update->plane_info) {
2851                 surface->color_space =
2852                                 srf_update->plane_info->color_space;
2853                 surface->format =
2854                                 srf_update->plane_info->format;
2855                 surface->plane_size =
2856                                 srf_update->plane_info->plane_size;
2857                 surface->rotation =
2858                                 srf_update->plane_info->rotation;
2859                 surface->horizontal_mirror =
2860                                 srf_update->plane_info->horizontal_mirror;
2861                 surface->stereo_format =
2862                                 srf_update->plane_info->stereo_format;
2863                 surface->tiling_info =
2864                                 srf_update->plane_info->tiling_info;
2865                 surface->visible =
2866                                 srf_update->plane_info->visible;
2867                 surface->per_pixel_alpha =
2868                                 srf_update->plane_info->per_pixel_alpha;
2869                 surface->global_alpha =
2870                                 srf_update->plane_info->global_alpha;
2871                 surface->global_alpha_value =
2872                                 srf_update->plane_info->global_alpha_value;
2873                 surface->dcc =
2874                                 srf_update->plane_info->dcc;
2875                 surface->layer_index =
2876                                 srf_update->plane_info->layer_index;
2877         }
2878
2879         if (srf_update->gamma &&
2880                         (surface->gamma_correction !=
2881                                         srf_update->gamma)) {
2882                 memcpy(&surface->gamma_correction->entries,
2883                         &srf_update->gamma->entries,
2884                         sizeof(struct dc_gamma_entries));
2885                 surface->gamma_correction->is_identity =
2886                         srf_update->gamma->is_identity;
2887                 surface->gamma_correction->num_entries =
2888                         srf_update->gamma->num_entries;
2889                 surface->gamma_correction->type =
2890                         srf_update->gamma->type;
2891         }
2892
2893         if (srf_update->in_transfer_func &&
2894                         (surface->in_transfer_func !=
2895                                 srf_update->in_transfer_func)) {
2896                 surface->in_transfer_func->sdr_ref_white_level =
2897                         srf_update->in_transfer_func->sdr_ref_white_level;
2898                 surface->in_transfer_func->tf =
2899                         srf_update->in_transfer_func->tf;
2900                 surface->in_transfer_func->type =
2901                         srf_update->in_transfer_func->type;
2902                 memcpy(&surface->in_transfer_func->tf_pts,
2903                         &srf_update->in_transfer_func->tf_pts,
2904                         sizeof(struct dc_transfer_func_distributed_points));
2905         }
2906
2907         if (srf_update->func_shaper &&
2908                         (surface->in_shaper_func !=
2909                         srf_update->func_shaper))
2910                 memcpy(surface->in_shaper_func, srf_update->func_shaper,
2911                 sizeof(*surface->in_shaper_func));
2912
2913         if (srf_update->lut3d_func &&
2914                         (surface->lut3d_func !=
2915                         srf_update->lut3d_func))
2916                 memcpy(surface->lut3d_func, srf_update->lut3d_func,
2917                 sizeof(*surface->lut3d_func));
2918
2919         if (srf_update->hdr_mult.value)
2920                 surface->hdr_mult =
2921                                 srf_update->hdr_mult;
2922
2923         if (srf_update->blend_tf &&
2924                         (surface->blend_tf !=
2925                         srf_update->blend_tf))
2926                 memcpy(surface->blend_tf, srf_update->blend_tf,
2927                 sizeof(*surface->blend_tf));
2928
2929         if (srf_update->input_csc_color_matrix)
2930                 surface->input_csc_color_matrix =
2931                         *srf_update->input_csc_color_matrix;
2932
2933         if (srf_update->coeff_reduction_factor)
2934                 surface->coeff_reduction_factor =
2935                         *srf_update->coeff_reduction_factor;
2936
2937         if (srf_update->gamut_remap_matrix)
2938                 surface->gamut_remap_matrix =
2939                         *srf_update->gamut_remap_matrix;
2940 }
2941
2942 static void copy_stream_update_to_stream(struct dc *dc,
2943                                          struct dc_state *context,
2944                                          struct dc_stream_state *stream,
2945                                          struct dc_stream_update *update)
2946 {
2947         struct dc_context *dc_ctx = dc->ctx;
2948
2949         if (update == NULL || stream == NULL)
2950                 return;
2951
2952         if (update->src.height && update->src.width)
2953                 stream->src = update->src;
2954
2955         if (update->dst.height && update->dst.width)
2956                 stream->dst = update->dst;
2957
2958         if (update->out_transfer_func &&
2959             stream->out_transfer_func != update->out_transfer_func) {
2960                 stream->out_transfer_func->sdr_ref_white_level =
2961                         update->out_transfer_func->sdr_ref_white_level;
2962                 stream->out_transfer_func->tf = update->out_transfer_func->tf;
2963                 stream->out_transfer_func->type =
2964                         update->out_transfer_func->type;
2965                 memcpy(&stream->out_transfer_func->tf_pts,
2966                        &update->out_transfer_func->tf_pts,
2967                        sizeof(struct dc_transfer_func_distributed_points));
2968         }
2969
2970         if (update->hdr_static_metadata)
2971                 stream->hdr_static_metadata = *update->hdr_static_metadata;
2972
2973         if (update->abm_level)
2974                 stream->abm_level = *update->abm_level;
2975
2976         if (update->periodic_interrupt)
2977                 stream->periodic_interrupt = *update->periodic_interrupt;
2978
2979         if (update->gamut_remap)
2980                 stream->gamut_remap_matrix = *update->gamut_remap;
2981
2982         /* Note: this being updated after mode set is currently not a use case
2983          * however if it arises OCSC would need to be reprogrammed at the
2984          * minimum
2985          */
2986         if (update->output_color_space)
2987                 stream->output_color_space = *update->output_color_space;
2988
2989         if (update->output_csc_transform)
2990                 stream->csc_color_matrix = *update->output_csc_transform;
2991
2992         if (update->vrr_infopacket)
2993                 stream->vrr_infopacket = *update->vrr_infopacket;
2994
2995         if (update->allow_freesync)
2996                 stream->allow_freesync = *update->allow_freesync;
2997
2998         if (update->vrr_active_variable)
2999                 stream->vrr_active_variable = *update->vrr_active_variable;
3000
3001         if (update->crtc_timing_adjust)
3002                 stream->adjust = *update->crtc_timing_adjust;
3003
3004         if (update->dpms_off)
3005                 stream->dpms_off = *update->dpms_off;
3006
3007         if (update->hfvsif_infopacket)
3008                 stream->hfvsif_infopacket = *update->hfvsif_infopacket;
3009
3010         if (update->vtem_infopacket)
3011                 stream->vtem_infopacket = *update->vtem_infopacket;
3012
3013         if (update->vsc_infopacket)
3014                 stream->vsc_infopacket = *update->vsc_infopacket;
3015
3016         if (update->vsp_infopacket)
3017                 stream->vsp_infopacket = *update->vsp_infopacket;
3018
3019         if (update->adaptive_sync_infopacket)
3020                 stream->adaptive_sync_infopacket = *update->adaptive_sync_infopacket;
3021
3022         if (update->dither_option)
3023                 stream->dither_option = *update->dither_option;
3024
3025         if (update->pending_test_pattern)
3026                 stream->test_pattern = *update->pending_test_pattern;
3027         /* update current stream with writeback info */
3028         if (update->wb_update) {
3029                 int i;
3030
3031                 stream->num_wb_info = update->wb_update->num_wb_info;
3032                 ASSERT(stream->num_wb_info <= MAX_DWB_PIPES);
3033                 for (i = 0; i < stream->num_wb_info; i++)
3034                         stream->writeback_info[i] =
3035                                 update->wb_update->writeback_info[i];
3036         }
3037         if (update->dsc_config) {
3038                 struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg;
3039                 uint32_t old_dsc_enabled = stream->timing.flags.DSC;
3040                 uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 &&
3041                                        update->dsc_config->num_slices_v != 0);
3042
3043                 /* Use temporarry context for validating new DSC config */
3044                 struct dc_state *dsc_validate_context = dc_create_state(dc);
3045
3046                 if (dsc_validate_context) {
3047                         dc_resource_state_copy_construct(dc->current_state, dsc_validate_context);
3048
3049                         stream->timing.dsc_cfg = *update->dsc_config;
3050                         stream->timing.flags.DSC = enable_dsc;
3051                         if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) {
3052                                 stream->timing.dsc_cfg = old_dsc_cfg;
3053                                 stream->timing.flags.DSC = old_dsc_enabled;
3054                                 update->dsc_config = NULL;
3055                         }
3056
3057                         dc_release_state(dsc_validate_context);
3058                 } else {
3059                         DC_ERROR("Failed to allocate new validate context for DSC change\n");
3060                         update->dsc_config = NULL;
3061                 }
3062         }
3063 }
3064
3065 static bool update_planes_and_stream_state(struct dc *dc,
3066                 struct dc_surface_update *srf_updates, int surface_count,
3067                 struct dc_stream_state *stream,
3068                 struct dc_stream_update *stream_update,
3069                 enum surface_update_type *new_update_type,
3070                 struct dc_state **new_context)
3071 {
3072         struct dc_state *context;
3073         int i, j;
3074         enum surface_update_type update_type;
3075         const struct dc_stream_status *stream_status;
3076         struct dc_context *dc_ctx = dc->ctx;
3077
3078         stream_status = dc_stream_get_status(stream);
3079
3080         if (!stream_status) {
3081                 if (surface_count) /* Only an error condition if surf_count non-zero*/
3082                         ASSERT(false);
3083
3084                 return false; /* Cannot commit surface to stream that is not committed */
3085         }
3086
3087         context = dc->current_state;
3088
3089         update_type = dc_check_update_surfaces_for_stream(
3090                         dc, srf_updates, surface_count, stream_update, stream_status);
3091
3092         /* update current stream with the new updates */
3093         copy_stream_update_to_stream(dc, context, stream, stream_update);
3094
3095         /* do not perform surface update if surface has invalid dimensions
3096          * (all zero) and no scaling_info is provided
3097          */
3098         if (surface_count > 0) {
3099                 for (i = 0; i < surface_count; i++) {
3100                         if ((srf_updates[i].surface->src_rect.width == 0 ||
3101                                  srf_updates[i].surface->src_rect.height == 0 ||
3102                                  srf_updates[i].surface->dst_rect.width == 0 ||
3103                                  srf_updates[i].surface->dst_rect.height == 0) &&
3104                                 (!srf_updates[i].scaling_info ||
3105                                   srf_updates[i].scaling_info->src_rect.width == 0 ||
3106                                   srf_updates[i].scaling_info->src_rect.height == 0 ||
3107                                   srf_updates[i].scaling_info->dst_rect.width == 0 ||
3108                                   srf_updates[i].scaling_info->dst_rect.height == 0)) {
3109                                 DC_ERROR("Invalid src/dst rects in surface update!\n");
3110                                 return false;
3111                         }
3112                 }
3113         }
3114
3115         if (update_type >= update_surface_trace_level)
3116                 update_surface_trace(dc, srf_updates, surface_count);
3117
3118         if (update_type >= UPDATE_TYPE_FULL) {
3119                 struct dc_plane_state *new_planes[MAX_SURFACES] = {0};
3120
3121                 for (i = 0; i < surface_count; i++)
3122                         new_planes[i] = srf_updates[i].surface;
3123
3124                 /* initialize scratch memory for building context */
3125                 context = dc_create_state(dc);
3126                 if (context == NULL) {
3127                         DC_ERROR("Failed to allocate new validate context!\n");
3128                         return false;
3129                 }
3130
3131                 dc_resource_state_copy_construct(
3132                                 dc->current_state, context);
3133
3134                 /* For each full update, remove all existing phantom pipes first.
3135                  * Ensures that we have enough pipes for newly added MPO planes
3136                  */
3137                 if (dc->res_pool->funcs->remove_phantom_pipes)
3138                         dc->res_pool->funcs->remove_phantom_pipes(dc, context, false);
3139
3140                 /*remove old surfaces from context */
3141                 if (!dc_rem_all_planes_for_stream(dc, stream, context)) {
3142
3143                         BREAK_TO_DEBUGGER();
3144                         goto fail;
3145                 }
3146
3147                 /* add surface to context */
3148                 if (!dc_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) {
3149
3150                         BREAK_TO_DEBUGGER();
3151                         goto fail;
3152                 }
3153         }
3154
3155         /* save update parameters into surface */
3156         for (i = 0; i < surface_count; i++) {
3157                 struct dc_plane_state *surface = srf_updates[i].surface;
3158
3159                 copy_surface_update_to_plane(surface, &srf_updates[i]);
3160
3161                 if (update_type >= UPDATE_TYPE_MED) {
3162                         for (j = 0; j < dc->res_pool->pipe_count; j++) {
3163                                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3164
3165                                 if (pipe_ctx->plane_state != surface)
3166                                         continue;
3167
3168                                 resource_build_scaling_params(pipe_ctx);
3169                         }
3170                 }
3171         }
3172
3173         if (update_type == UPDATE_TYPE_FULL) {
3174                 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
3175                         /* For phantom pipes we remove and create a new set of phantom pipes
3176                          * for each full update (because we don't know if we'll need phantom
3177                          * pipes until after the first round of validation). However, if validation
3178                          * fails we need to keep the existing phantom pipes (because we don't update
3179                          * the dc->current_state).
3180                          *
3181                          * The phantom stream/plane refcount is decremented for validation because
3182                          * we assume it'll be removed (the free comes when the dc_state is freed),
3183                          * but if validation fails we have to increment back the refcount so it's
3184                          * consistent.
3185                          */
3186                         if (dc->res_pool->funcs->retain_phantom_pipes)
3187                                 dc->res_pool->funcs->retain_phantom_pipes(dc, dc->current_state);
3188                         BREAK_TO_DEBUGGER();
3189                         goto fail;
3190                 }
3191         }
3192
3193         *new_context = context;
3194         *new_update_type = update_type;
3195
3196         return true;
3197
3198 fail:
3199         dc_release_state(context);
3200
3201         return false;
3202
3203 }
3204
3205 static void commit_planes_do_stream_update(struct dc *dc,
3206                 struct dc_stream_state *stream,
3207                 struct dc_stream_update *stream_update,
3208                 enum surface_update_type update_type,
3209                 struct dc_state *context)
3210 {
3211         int j;
3212
3213         // Stream updates
3214         for (j = 0; j < dc->res_pool->pipe_count; j++) {
3215                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3216
3217                 if (!pipe_ctx->top_pipe &&  !pipe_ctx->prev_odm_pipe && pipe_ctx->stream == stream) {
3218
3219                         if (stream_update->periodic_interrupt && dc->hwss.setup_periodic_interrupt)
3220                                 dc->hwss.setup_periodic_interrupt(dc, pipe_ctx);
3221
3222                         if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) ||
3223                                         stream_update->vrr_infopacket ||
3224                                         stream_update->vsc_infopacket ||
3225                                         stream_update->vsp_infopacket ||
3226                                         stream_update->hfvsif_infopacket ||
3227                                         stream_update->adaptive_sync_infopacket ||
3228                                         stream_update->vtem_infopacket) {
3229                                 resource_build_info_frame(pipe_ctx);
3230                                 dc->hwss.update_info_frame(pipe_ctx);
3231
3232                                 if (dc_is_dp_signal(pipe_ctx->stream->signal))
3233                                         link_dp_source_sequence_trace(pipe_ctx->stream->link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
3234                         }
3235
3236                         if (stream_update->hdr_static_metadata &&
3237                                         stream->use_dynamic_meta &&
3238                                         dc->hwss.set_dmdata_attributes &&
3239                                         pipe_ctx->stream->dmdata_address.quad_part != 0)
3240                                 dc->hwss.set_dmdata_attributes(pipe_ctx);
3241
3242                         if (stream_update->gamut_remap)
3243                                 dc_stream_set_gamut_remap(dc, stream);
3244
3245                         if (stream_update->output_csc_transform)
3246                                 dc_stream_program_csc_matrix(dc, stream);
3247
3248                         if (stream_update->dither_option) {
3249                                 struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
3250                                 resource_build_bit_depth_reduction_params(pipe_ctx->stream,
3251                                                                         &pipe_ctx->stream->bit_depth_params);
3252                                 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp,
3253                                                 &stream->bit_depth_params,
3254                                                 &stream->clamping);
3255                                 while (odm_pipe) {
3256                                         odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp,
3257                                                         &stream->bit_depth_params,
3258                                                         &stream->clamping);
3259                                         odm_pipe = odm_pipe->next_odm_pipe;
3260                                 }
3261                         }
3262
3263
3264                         /* Full fe update*/
3265                         if (update_type == UPDATE_TYPE_FAST)
3266                                 continue;
3267
3268                         if (stream_update->dsc_config)
3269                                 link_update_dsc_config(pipe_ctx);
3270
3271                         if (stream_update->mst_bw_update) {
3272                                 if (stream_update->mst_bw_update->is_increase)
3273                                         link_increase_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw);
3274                                 else
3275                                         link_reduce_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw);
3276                         }
3277
3278                         if (stream_update->pending_test_pattern) {
3279                                 dc_link_dp_set_test_pattern(stream->link,
3280                                         stream->test_pattern.type,
3281                                         stream->test_pattern.color_space,
3282                                         stream->test_pattern.p_link_settings,
3283                                         stream->test_pattern.p_custom_pattern,
3284                                         stream->test_pattern.cust_pattern_size);
3285                         }
3286
3287                         if (stream_update->dpms_off) {
3288                                 if (*stream_update->dpms_off) {
3289                                         link_set_dpms_off(pipe_ctx);
3290                                         /* for dpms, keep acquired resources*/
3291                                         if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only)
3292                                                 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
3293
3294                                         dc->optimized_required = true;
3295
3296                                 } else {
3297                                         if (get_seamless_boot_stream_count(context) == 0)
3298                                                 dc->hwss.prepare_bandwidth(dc, dc->current_state);
3299                                         link_set_dpms_on(dc->current_state, pipe_ctx);
3300                                 }
3301                         }
3302
3303                         if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
3304                                 bool should_program_abm = true;
3305
3306                                 // if otg funcs defined check if blanked before programming
3307                                 if (pipe_ctx->stream_res.tg->funcs->is_blanked)
3308                                         if (pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
3309                                                 should_program_abm = false;
3310
3311                                 if (should_program_abm) {
3312                                         if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) {
3313                                                 dc->hwss.set_abm_immediate_disable(pipe_ctx);
3314                                         } else {
3315                                                 pipe_ctx->stream_res.abm->funcs->set_abm_level(
3316                                                         pipe_ctx->stream_res.abm, stream->abm_level);
3317                                         }
3318                                 }
3319                         }
3320                 }
3321         }
3322 }
3323
3324 static bool dc_dmub_should_send_dirty_rect_cmd(struct dc *dc, struct dc_stream_state *stream)
3325 {
3326         if ((stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1
3327                         || stream->link->psr_settings.psr_version == DC_PSR_VERSION_1)
3328                         && stream->ctx->dce_version >= DCN_VERSION_3_1)
3329                 return true;
3330
3331         return false;
3332 }
3333
3334 void dc_dmub_update_dirty_rect(struct dc *dc,
3335                                int surface_count,
3336                                struct dc_stream_state *stream,
3337                                struct dc_surface_update *srf_updates,
3338                                struct dc_state *context)
3339 {
3340         union dmub_rb_cmd cmd;
3341         struct dc_context *dc_ctx = dc->ctx;
3342         struct dmub_cmd_update_dirty_rect_data *update_dirty_rect;
3343         unsigned int i, j;
3344         unsigned int panel_inst = 0;
3345
3346         if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream))
3347                 return;
3348
3349         if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst))
3350                 return;
3351
3352         memset(&cmd, 0x0, sizeof(cmd));
3353         cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT;
3354         cmd.update_dirty_rect.header.sub_type = 0;
3355         cmd.update_dirty_rect.header.payload_bytes =
3356                 sizeof(cmd.update_dirty_rect) -
3357                 sizeof(cmd.update_dirty_rect.header);
3358         update_dirty_rect = &cmd.update_dirty_rect.update_dirty_rect_data;
3359         for (i = 0; i < surface_count; i++) {
3360                 struct dc_plane_state *plane_state = srf_updates[i].surface;
3361                 const struct dc_flip_addrs *flip_addr = srf_updates[i].flip_addr;
3362
3363                 if (!srf_updates[i].surface || !flip_addr)
3364                         continue;
3365                 /* Do not send in immediate flip mode */
3366                 if (srf_updates[i].surface->flip_immediate)
3367                         continue;
3368
3369                 update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count;
3370                 memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects,
3371                                 sizeof(flip_addr->dirty_rects));
3372                 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3373                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3374
3375                         if (pipe_ctx->stream != stream)
3376                                 continue;
3377                         if (pipe_ctx->plane_state != plane_state)
3378                                 continue;
3379
3380                         update_dirty_rect->panel_inst = panel_inst;
3381                         update_dirty_rect->pipe_idx = j;
3382                         dc_dmub_srv_cmd_queue(dc_ctx->dmub_srv, &cmd);
3383                         dc_dmub_srv_cmd_execute(dc_ctx->dmub_srv);
3384                 }
3385         }
3386 }
3387
3388 static void commit_planes_for_stream(struct dc *dc,
3389                 struct dc_surface_update *srf_updates,
3390                 int surface_count,
3391                 struct dc_stream_state *stream,
3392                 struct dc_stream_update *stream_update,
3393                 enum surface_update_type update_type,
3394                 struct dc_state *context)
3395 {
3396         int i, j;
3397         struct pipe_ctx *top_pipe_to_program = NULL;
3398         bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST);
3399         bool subvp_prev_use = false;
3400         bool subvp_curr_use = false;
3401
3402         // Once we apply the new subvp context to hardware it won't be in the
3403         // dc->current_state anymore, so we have to cache it before we apply
3404         // the new SubVP context
3405         subvp_prev_use = false;
3406
3407
3408         dc_z10_restore(dc);
3409
3410         if (update_type == UPDATE_TYPE_FULL) {
3411                 /* wait for all double-buffer activity to clear on all pipes */
3412                 int pipe_idx;
3413
3414                 for (pipe_idx = 0; pipe_idx < dc->res_pool->pipe_count; pipe_idx++) {
3415                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx];
3416
3417                         if (!pipe_ctx->stream)
3418                                 continue;
3419
3420                         if (pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear)
3421                                 pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear(pipe_ctx->stream_res.tg);
3422                 }
3423         }
3424
3425         if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) {
3426                 /* Optimize seamless boot flag keeps clocks and watermarks high until
3427                  * first flip. After first flip, optimization is required to lower
3428                  * bandwidth. Important to note that it is expected UEFI will
3429                  * only light up a single display on POST, therefore we only expect
3430                  * one stream with seamless boot flag set.
3431                  */
3432                 if (stream->apply_seamless_boot_optimization) {
3433                         stream->apply_seamless_boot_optimization = false;
3434
3435                         if (get_seamless_boot_stream_count(context) == 0)
3436                                 dc->optimized_required = true;
3437                 }
3438         }
3439
3440         if (update_type == UPDATE_TYPE_FULL) {
3441                 dc_allow_idle_optimizations(dc, false);
3442
3443                 if (get_seamless_boot_stream_count(context) == 0)
3444                         dc->hwss.prepare_bandwidth(dc, context);
3445
3446                 if (dc->debug.enable_double_buffered_dsc_pg_support)
3447                         dc->hwss.update_dsc_pg(dc, context, false);
3448
3449                 context_clock_trace(dc, context);
3450         }
3451
3452         for (j = 0; j < dc->res_pool->pipe_count; j++) {
3453                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3454
3455                 if (!pipe_ctx->top_pipe &&
3456                         !pipe_ctx->prev_odm_pipe &&
3457                         pipe_ctx->stream &&
3458                         pipe_ctx->stream == stream) {
3459                         top_pipe_to_program = pipe_ctx;
3460                 }
3461         }
3462
3463         for (i = 0; i < dc->res_pool->pipe_count; i++) {
3464                 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
3465
3466                 // Check old context for SubVP
3467                 subvp_prev_use |= (old_pipe->stream && old_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM);
3468                 if (subvp_prev_use)
3469                         break;
3470         }
3471
3472         for (i = 0; i < dc->res_pool->pipe_count; i++) {
3473                 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
3474
3475                 if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
3476                         subvp_curr_use = true;
3477                         break;
3478                 }
3479         }
3480
3481         if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) {
3482                 struct pipe_ctx *mpcc_pipe;
3483                 struct pipe_ctx *odm_pipe;
3484
3485                 for (mpcc_pipe = top_pipe_to_program; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe)
3486                         for (odm_pipe = mpcc_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
3487                                 odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU;
3488         }
3489
3490         if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
3491                 if (top_pipe_to_program &&
3492                         top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
3493                         if (should_use_dmub_lock(stream->link)) {
3494                                 union dmub_hw_lock_flags hw_locks = { 0 };
3495                                 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
3496
3497                                 hw_locks.bits.lock_dig = 1;
3498                                 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
3499
3500                                 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
3501                                                         true,
3502                                                         &hw_locks,
3503                                                         &inst_flags);
3504                         } else
3505                                 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable(
3506                                                 top_pipe_to_program->stream_res.tg);
3507                 }
3508
3509         if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3510                 if (dc->hwss.subvp_pipe_control_lock)
3511                                 dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, NULL, subvp_prev_use);
3512                 dc->hwss.interdependent_update_lock(dc, context, true);
3513
3514         } else {
3515                 if (dc->hwss.subvp_pipe_control_lock)
3516                         dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
3517                 /* Lock the top pipe while updating plane addrs, since freesync requires
3518                  *  plane addr update event triggers to be synchronized.
3519                  *  top_pipe_to_program is expected to never be NULL
3520                  */
3521                 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
3522         }
3523
3524         dc_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context);
3525
3526         if (update_type != UPDATE_TYPE_FAST) {
3527                 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3528                         struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
3529
3530                         if ((new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) ||
3531                                         subvp_prev_use) {
3532                                 // If old context or new context has phantom pipes, apply
3533                                 // the phantom timings now. We can't change the phantom
3534                                 // pipe configuration safely without driver acquiring
3535                                 // the DMCUB lock first.
3536                                 dc->hwss.apply_ctx_to_hw(dc, context);
3537                                 break;
3538                         }
3539                 }
3540         }
3541
3542         // Stream updates
3543         if (stream_update)
3544                 commit_planes_do_stream_update(dc, stream, stream_update, update_type, context);
3545
3546         if (surface_count == 0) {
3547                 /*
3548                  * In case of turning off screen, no need to program front end a second time.
3549                  * just return after program blank.
3550                  */
3551                 if (dc->hwss.apply_ctx_for_surface)
3552                         dc->hwss.apply_ctx_for_surface(dc, stream, 0, context);
3553                 if (dc->hwss.program_front_end_for_ctx)
3554                         dc->hwss.program_front_end_for_ctx(dc, context);
3555
3556                 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3557                         dc->hwss.interdependent_update_lock(dc, context, false);
3558                 } else {
3559                         dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
3560                 }
3561                 dc->hwss.post_unlock_program_front_end(dc, context);
3562
3563                 if (update_type != UPDATE_TYPE_FAST)
3564                         if (dc->hwss.commit_subvp_config)
3565                                 dc->hwss.commit_subvp_config(dc, context);
3566
3567                 /* Since phantom pipe programming is moved to post_unlock_program_front_end,
3568                  * move the SubVP lock to after the phantom pipes have been setup
3569                  */
3570                 if (dc->hwss.subvp_pipe_control_lock)
3571                         dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes,
3572                                                          NULL, subvp_prev_use);
3573                 return;
3574         }
3575
3576         if (update_type != UPDATE_TYPE_FAST) {
3577                 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3578                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3579
3580                         if (dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP &&
3581                                 pipe_ctx->stream && pipe_ctx->plane_state) {
3582                                 /* Only update visual confirm for SUBVP here.
3583                                  * The bar appears on all pipes, so we need to update the bar on all displays,
3584                                  * so the information doesn't get stale.
3585                                  */
3586                                 struct mpcc_blnd_cfg blnd_cfg = { 0 };
3587
3588                                 dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color,
3589                                                 pipe_ctx->plane_res.hubp->inst);
3590                         }
3591                 }
3592         }
3593
3594         if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
3595                 for (i = 0; i < surface_count; i++) {
3596                         struct dc_plane_state *plane_state = srf_updates[i].surface;
3597                         /*set logical flag for lock/unlock use*/
3598                         for (j = 0; j < dc->res_pool->pipe_count; j++) {
3599                                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3600                                 if (!pipe_ctx->plane_state)
3601                                         continue;
3602                                 if (should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3603                                         continue;
3604                                 pipe_ctx->plane_state->triplebuffer_flips = false;
3605                                 if (update_type == UPDATE_TYPE_FAST &&
3606                                         dc->hwss.program_triplebuffer != NULL &&
3607                                         !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
3608                                                 /*triple buffer for VUpdate  only*/
3609                                                 pipe_ctx->plane_state->triplebuffer_flips = true;
3610                                 }
3611                         }
3612                         if (update_type == UPDATE_TYPE_FULL) {
3613                                 /* force vsync flip when reconfiguring pipes to prevent underflow */
3614                                 plane_state->flip_immediate = false;
3615                         }
3616                 }
3617         }
3618
3619         // Update Type FULL, Surface updates
3620         for (j = 0; j < dc->res_pool->pipe_count; j++) {
3621                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3622
3623                 if (!pipe_ctx->top_pipe &&
3624                         !pipe_ctx->prev_odm_pipe &&
3625                         should_update_pipe_for_stream(context, pipe_ctx, stream)) {
3626                         struct dc_stream_status *stream_status = NULL;
3627
3628                         if (!pipe_ctx->plane_state)
3629                                 continue;
3630
3631                         /* Full fe update*/
3632                         if (update_type == UPDATE_TYPE_FAST)
3633                                 continue;
3634
3635                         ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
3636
3637                         if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
3638                                 /*turn off triple buffer for full update*/
3639                                 dc->hwss.program_triplebuffer(
3640                                         dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
3641                         }
3642                         stream_status =
3643                                 stream_get_status(context, pipe_ctx->stream);
3644
3645                         if (dc->hwss.apply_ctx_for_surface)
3646                                 dc->hwss.apply_ctx_for_surface(
3647                                         dc, pipe_ctx->stream, stream_status->plane_count, context);
3648                 }
3649         }
3650         if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) {
3651                 dc->hwss.program_front_end_for_ctx(dc, context);
3652                 if (dc->debug.validate_dml_output) {
3653                         for (i = 0; i < dc->res_pool->pipe_count; i++) {
3654                                 struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i];
3655                                 if (cur_pipe->stream == NULL)
3656                                         continue;
3657
3658                                 cur_pipe->plane_res.hubp->funcs->validate_dml_output(
3659                                                 cur_pipe->plane_res.hubp, dc->ctx,
3660                                                 &context->res_ctx.pipe_ctx[i].rq_regs,
3661                                                 &context->res_ctx.pipe_ctx[i].dlg_regs,
3662                                                 &context->res_ctx.pipe_ctx[i].ttu_regs);
3663                         }
3664                 }
3665         }
3666
3667         // Update Type FAST, Surface updates
3668         if (update_type == UPDATE_TYPE_FAST) {
3669                 if (dc->hwss.set_flip_control_gsl)
3670                         for (i = 0; i < surface_count; i++) {
3671                                 struct dc_plane_state *plane_state = srf_updates[i].surface;
3672
3673                                 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3674                                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3675
3676                                         if (!should_update_pipe_for_stream(context, pipe_ctx, stream))
3677                                                 continue;
3678
3679                                         if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3680                                                 continue;
3681
3682                                         // GSL has to be used for flip immediate
3683                                         dc->hwss.set_flip_control_gsl(pipe_ctx,
3684                                                         pipe_ctx->plane_state->flip_immediate);
3685                                 }
3686                         }
3687
3688                 /* Perform requested Updates */
3689                 for (i = 0; i < surface_count; i++) {
3690                         struct dc_plane_state *plane_state = srf_updates[i].surface;
3691
3692                         for (j = 0; j < dc->res_pool->pipe_count; j++) {
3693                                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3694
3695                                 if (!should_update_pipe_for_stream(context, pipe_ctx, stream))
3696                                         continue;
3697
3698                                 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3699                                         continue;
3700
3701                                 /*program triple buffer after lock based on flip type*/
3702                                 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
3703                                         /*only enable triplebuffer for  fast_update*/
3704                                         dc->hwss.program_triplebuffer(
3705                                                 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
3706                                 }
3707                                 if (pipe_ctx->plane_state->update_flags.bits.addr_update)
3708                                         dc->hwss.update_plane_addr(dc, pipe_ctx);
3709                         }
3710                 }
3711         }
3712
3713         if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3714                 dc->hwss.interdependent_update_lock(dc, context, false);
3715         } else {
3716                 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
3717         }
3718
3719         if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
3720                 if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
3721                         top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3722                                 top_pipe_to_program->stream_res.tg,
3723                                 CRTC_STATE_VACTIVE);
3724                         top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3725                                 top_pipe_to_program->stream_res.tg,
3726                                 CRTC_STATE_VBLANK);
3727                         top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3728                                 top_pipe_to_program->stream_res.tg,
3729                                 CRTC_STATE_VACTIVE);
3730
3731                         if (should_use_dmub_lock(stream->link)) {
3732                                 union dmub_hw_lock_flags hw_locks = { 0 };
3733                                 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
3734
3735                                 hw_locks.bits.lock_dig = 1;
3736                                 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
3737
3738                                 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
3739                                                         false,
3740                                                         &hw_locks,
3741                                                         &inst_flags);
3742                         } else
3743                                 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable(
3744                                         top_pipe_to_program->stream_res.tg);
3745                 }
3746
3747         if (subvp_curr_use) {
3748                 /* If enabling subvp or transitioning from subvp->subvp, enable the
3749                  * phantom streams before we program front end for the phantom pipes.
3750                  */
3751                 if (update_type != UPDATE_TYPE_FAST) {
3752                         if (dc->hwss.enable_phantom_streams)
3753                                 dc->hwss.enable_phantom_streams(dc, context);
3754                 }
3755         }
3756
3757         if (subvp_prev_use && !subvp_curr_use) {
3758                 /* If disabling subvp, disable phantom streams after front end
3759                  * programming has completed (we turn on phantom OTG in order
3760                  * to complete the plane disable for phantom pipes).
3761                  */
3762                 dc->hwss.apply_ctx_to_hw(dc, context);
3763         }
3764
3765         if (update_type != UPDATE_TYPE_FAST)
3766                 dc->hwss.post_unlock_program_front_end(dc, context);
3767         if (update_type != UPDATE_TYPE_FAST)
3768                 if (dc->hwss.commit_subvp_config)
3769                         dc->hwss.commit_subvp_config(dc, context);
3770
3771         if (update_type != UPDATE_TYPE_FAST)
3772                 if (dc->hwss.commit_subvp_config)
3773                         dc->hwss.commit_subvp_config(dc, context);
3774
3775         /* Since phantom pipe programming is moved to post_unlock_program_front_end,
3776          * move the SubVP lock to after the phantom pipes have been setup
3777          */
3778         if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3779                 if (dc->hwss.subvp_pipe_control_lock)
3780                         dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
3781         } else {
3782                 if (dc->hwss.subvp_pipe_control_lock)
3783                         dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
3784         }
3785
3786         // Fire manual trigger only when bottom plane is flipped
3787         for (j = 0; j < dc->res_pool->pipe_count; j++) {
3788                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3789
3790                 if (!pipe_ctx->plane_state)
3791                         continue;
3792
3793                 if (pipe_ctx->bottom_pipe || pipe_ctx->next_odm_pipe ||
3794                                 !pipe_ctx->stream || !should_update_pipe_for_stream(context, pipe_ctx, stream) ||
3795                                 !pipe_ctx->plane_state->update_flags.bits.addr_update ||
3796                                 pipe_ctx->plane_state->skip_manual_trigger)
3797                         continue;
3798
3799                 if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger)
3800                         pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg);
3801         }
3802 }
3803
3804 /**
3805  * could_mpcc_tree_change_for_active_pipes - Check if an OPP associated with MPCC might change
3806  *
3807  * @dc: Used to get the current state status
3808  * @stream: Target stream, which we want to remove the attached planes
3809  * @surface_count: Number of surface update
3810  * @is_plane_addition: [in] Fill out with true if it is a plane addition case
3811  *
3812  * DCN32x and newer support a feature named Dynamic ODM which can conflict with
3813  * the MPO if used simultaneously in some specific configurations (e.g.,
3814  * 4k@144). This function checks if the incoming context requires applying a
3815  * transition state with unnecessary pipe splitting and ODM disabled to
3816  * circumvent our hardware limitations to prevent this edge case. If the OPP
3817  * associated with an MPCC might change due to plane additions, this function
3818  * returns true.
3819  *
3820  * Return:
3821  * Return true if OPP and MPCC might change, otherwise, return false.
3822  */
3823 static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc,
3824                 struct dc_stream_state *stream,
3825                 int surface_count,
3826                 bool *is_plane_addition)
3827 {
3828
3829         struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream);
3830         bool force_minimal_pipe_splitting = false;
3831         bool subvp_active = false;
3832         uint32_t i;
3833
3834         *is_plane_addition = false;
3835
3836         if (cur_stream_status &&
3837                         dc->current_state->stream_count > 0 &&
3838                         dc->debug.pipe_split_policy != MPC_SPLIT_AVOID) {
3839                 /* determine if minimal transition is required due to MPC*/
3840                 if (surface_count > 0) {
3841                         if (cur_stream_status->plane_count > surface_count) {
3842                                 force_minimal_pipe_splitting = true;
3843                         } else if (cur_stream_status->plane_count < surface_count) {
3844                                 force_minimal_pipe_splitting = true;
3845                                 *is_plane_addition = true;
3846                         }
3847                 }
3848         }
3849
3850         if (cur_stream_status &&
3851                         dc->current_state->stream_count == 1 &&
3852                         dc->debug.enable_single_display_2to1_odm_policy) {
3853                 /* determine if minimal transition is required due to dynamic ODM*/
3854                 if (surface_count > 0) {
3855                         if (cur_stream_status->plane_count > 2 && cur_stream_status->plane_count > surface_count) {
3856                                 force_minimal_pipe_splitting = true;
3857                         } else if (surface_count > 2 && cur_stream_status->plane_count < surface_count) {
3858                                 force_minimal_pipe_splitting = true;
3859                                 *is_plane_addition = true;
3860                         }
3861                 }
3862         }
3863
3864         for (i = 0; i < dc->res_pool->pipe_count; i++) {
3865                 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
3866
3867                 if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_NONE) {
3868                         subvp_active = true;
3869                         break;
3870                 }
3871         }
3872
3873         /* For SubVP when adding or removing planes we need to add a minimal transition
3874          * (even when disabling all planes). Whenever disabling a phantom pipe, we
3875          * must use the minimal transition path to disable the pipe correctly.
3876          *
3877          * We want to use the minimal transition whenever subvp is active, not only if
3878          * a plane is being added / removed from a subvp stream (MPO plane can be added
3879          * to a DRR pipe of SubVP + DRR config, in which case we still want to run through
3880          * a min transition to disable subvp.
3881          */
3882         if (cur_stream_status && subvp_active) {
3883                 /* determine if minimal transition is required due to SubVP*/
3884                 if (cur_stream_status->plane_count > surface_count) {
3885                         force_minimal_pipe_splitting = true;
3886                 } else if (cur_stream_status->plane_count < surface_count) {
3887                         force_minimal_pipe_splitting = true;
3888                         *is_plane_addition = true;
3889                 }
3890         }
3891
3892         return force_minimal_pipe_splitting;
3893 }
3894
3895 /**
3896  * commit_minimal_transition_state - Create a transition pipe split state
3897  *
3898  * @dc: Used to get the current state status
3899  * @transition_base_context: New transition state
3900  *
3901  * In some specific configurations, such as pipe split on multi-display with
3902  * MPO and/or Dynamic ODM, removing a plane may cause unsupported pipe
3903  * programming when moving to new planes. To mitigate those types of problems,
3904  * this function adds a transition state that minimizes pipe usage before
3905  * programming the new configuration. When adding a new plane, the current
3906  * state requires the least pipes, so it is applied without splitting. When
3907  * removing a plane, the new state requires the least pipes, so it is applied
3908  * without splitting.
3909  *
3910  * Return:
3911  * Return false if something is wrong in the transition state.
3912  */
3913 static bool commit_minimal_transition_state(struct dc *dc,
3914                 struct dc_state *transition_base_context)
3915 {
3916         struct dc_state *transition_context = dc_create_state(dc);
3917         enum pipe_split_policy tmp_mpc_policy;
3918         bool temp_dynamic_odm_policy;
3919         bool temp_subvp_policy;
3920         enum dc_status ret = DC_ERROR_UNEXPECTED;
3921         unsigned int i, j;
3922         unsigned int pipe_in_use = 0;
3923         bool subvp_in_use = false;
3924
3925         if (!transition_context)
3926                 return false;
3927         /* Setup:
3928          * Store the current ODM and MPC config in some temp variables to be
3929          * restored after we commit the transition state.
3930          */
3931
3932         /* check current pipes in use*/
3933         for (i = 0; i < dc->res_pool->pipe_count; i++) {
3934                 struct pipe_ctx *pipe = &transition_base_context->res_ctx.pipe_ctx[i];
3935
3936                 if (pipe->plane_state)
3937                         pipe_in_use++;
3938         }
3939
3940         /* If SubVP is enabled and we are adding or removing planes from any main subvp
3941          * pipe, we must use the minimal transition.
3942          */
3943         for (i = 0; i < dc->res_pool->pipe_count; i++) {
3944                 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
3945
3946                 if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
3947                         subvp_in_use = true;
3948                         break;
3949                 }
3950         }
3951
3952         /* When the OS add a new surface if we have been used all of pipes with odm combine
3953          * and mpc split feature, it need use commit_minimal_transition_state to transition safely.
3954          * After OS exit MPO, it will back to use odm and mpc split with all of pipes, we need
3955          * call it again. Otherwise return true to skip.
3956          *
3957          * Reduce the scenarios to use dc_commit_state_no_check in the stage of flip. Especially
3958          * enter/exit MPO when DCN still have enough resources.
3959          */
3960         if (pipe_in_use != dc->res_pool->pipe_count && !subvp_in_use) {
3961                 dc_release_state(transition_context);
3962                 return true;
3963         }
3964
3965         if (!dc->config.is_vmin_only_asic) {
3966                 tmp_mpc_policy = dc->debug.pipe_split_policy;
3967                 dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
3968         }
3969
3970         temp_dynamic_odm_policy = dc->debug.enable_single_display_2to1_odm_policy;
3971         dc->debug.enable_single_display_2to1_odm_policy = false;
3972
3973         temp_subvp_policy = dc->debug.force_disable_subvp;
3974         dc->debug.force_disable_subvp = true;
3975
3976         dc_resource_state_copy_construct(transition_base_context, transition_context);
3977
3978         /* commit minimal state */
3979         if (dc->res_pool->funcs->validate_bandwidth(dc, transition_context, false)) {
3980                 for (i = 0; i < transition_context->stream_count; i++) {
3981                         struct dc_stream_status *stream_status = &transition_context->stream_status[i];
3982
3983                         for (j = 0; j < stream_status->plane_count; j++) {
3984                                 struct dc_plane_state *plane_state = stream_status->plane_states[j];
3985
3986                                 /* force vsync flip when reconfiguring pipes to prevent underflow
3987                                  * and corruption
3988                                  */
3989                                 plane_state->flip_immediate = false;
3990                         }
3991                 }
3992
3993                 ret = dc_commit_state_no_check(dc, transition_context);
3994         }
3995
3996         /* always release as dc_commit_state_no_check retains in good case */
3997         dc_release_state(transition_context);
3998
3999         /* TearDown:
4000          * Restore original configuration for ODM and MPO.
4001          */
4002         if (!dc->config.is_vmin_only_asic)
4003                 dc->debug.pipe_split_policy = tmp_mpc_policy;
4004
4005         dc->debug.enable_single_display_2to1_odm_policy = temp_dynamic_odm_policy;
4006         dc->debug.force_disable_subvp = temp_subvp_policy;
4007
4008         if (ret != DC_OK) {
4009                 /* this should never happen */
4010                 BREAK_TO_DEBUGGER();
4011                 return false;
4012         }
4013
4014         /* force full surface update */
4015         for (i = 0; i < dc->current_state->stream_count; i++) {
4016                 for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) {
4017                         dc->current_state->stream_status[i].plane_states[j]->update_flags.raw = 0xFFFFFFFF;
4018                 }
4019         }
4020
4021         return true;
4022 }
4023
4024 bool dc_update_planes_and_stream(struct dc *dc,
4025                 struct dc_surface_update *srf_updates, int surface_count,
4026                 struct dc_stream_state *stream,
4027                 struct dc_stream_update *stream_update)
4028 {
4029         struct dc_state *context;
4030         enum surface_update_type update_type;
4031         int i;
4032         struct mall_temp_config mall_temp_config;
4033
4034         /* In cases where MPO and split or ODM are used transitions can
4035          * cause underflow. Apply stream configuration with minimal pipe
4036          * split first to avoid unsupported transitions for active pipes.
4037          */
4038         bool force_minimal_pipe_splitting;
4039         bool is_plane_addition;
4040
4041         force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes(
4042                         dc,
4043                         stream,
4044                         surface_count,
4045                         &is_plane_addition);
4046
4047         /* on plane addition, minimal state is the current one */
4048         if (force_minimal_pipe_splitting && is_plane_addition &&
4049                 !commit_minimal_transition_state(dc, dc->current_state))
4050                                 return false;
4051
4052         if (!update_planes_and_stream_state(
4053                         dc,
4054                         srf_updates,
4055                         surface_count,
4056                         stream,
4057                         stream_update,
4058                         &update_type,
4059                         &context))
4060                 return false;
4061
4062         /* on plane removal, minimal state is the new one */
4063         if (force_minimal_pipe_splitting && !is_plane_addition) {
4064                 /* Since all phantom pipes are removed in full validation,
4065                  * we have to save and restore the subvp/mall config when
4066                  * we do a minimal transition since the flags marking the
4067                  * pipe as subvp/phantom will be cleared (dc copy constructor
4068                  * creates a shallow copy).
4069                  */
4070                 if (dc->res_pool->funcs->save_mall_state)
4071                         dc->res_pool->funcs->save_mall_state(dc, context, &mall_temp_config);
4072                 if (!commit_minimal_transition_state(dc, context)) {
4073                         dc_release_state(context);
4074                         return false;
4075                 }
4076                 if (dc->res_pool->funcs->restore_mall_state)
4077                         dc->res_pool->funcs->restore_mall_state(dc, context, &mall_temp_config);
4078
4079                 /* If we do a minimal transition with plane removal and the context
4080                  * has subvp we also have to retain back the phantom stream / planes
4081                  * since the refcount is decremented as part of the min transition
4082                  * (we commit a state with no subvp, so the phantom streams / planes
4083                  * had to be removed).
4084                  */
4085                 if (dc->res_pool->funcs->retain_phantom_pipes)
4086                         dc->res_pool->funcs->retain_phantom_pipes(dc, context);
4087                 update_type = UPDATE_TYPE_FULL;
4088         }
4089
4090         commit_planes_for_stream(
4091                         dc,
4092                         srf_updates,
4093                         surface_count,
4094                         stream,
4095                         stream_update,
4096                         update_type,
4097                         context);
4098
4099         if (dc->current_state != context) {
4100
4101                 /* Since memory free requires elevated IRQL, an interrupt
4102                  * request is generated by mem free. If this happens
4103                  * between freeing and reassigning the context, our vsync
4104                  * interrupt will call into dc and cause a memory
4105                  * corruption BSOD. Hence, we first reassign the context,
4106                  * then free the old context.
4107                  */
4108
4109                 struct dc_state *old = dc->current_state;
4110
4111                 dc->current_state = context;
4112                 dc_release_state(old);
4113
4114                 // clear any forced full updates
4115                 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4116                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
4117
4118                         if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
4119                                 pipe_ctx->plane_state->force_full_update = false;
4120                 }
4121         }
4122         return true;
4123 }
4124
4125 void dc_commit_updates_for_stream(struct dc *dc,
4126                 struct dc_surface_update *srf_updates,
4127                 int surface_count,
4128                 struct dc_stream_state *stream,
4129                 struct dc_stream_update *stream_update,
4130                 struct dc_state *state)
4131 {
4132         const struct dc_stream_status *stream_status;
4133         enum surface_update_type update_type;
4134         struct dc_state *context;
4135         struct dc_context *dc_ctx = dc->ctx;
4136         int i, j;
4137
4138         stream_status = dc_stream_get_status(stream);
4139         context = dc->current_state;
4140
4141         update_type = dc_check_update_surfaces_for_stream(
4142                                 dc, srf_updates, surface_count, stream_update, stream_status);
4143
4144         /* TODO: Since change commit sequence can have a huge impact,
4145          * we decided to only enable it for DCN3x. However, as soon as
4146          * we get more confident about this change we'll need to enable
4147          * the new sequence for all ASICs.
4148          */
4149         if (dc->ctx->dce_version >= DCN_VERSION_3_2) {
4150                 /*
4151                  * Previous frame finished and HW is ready for optimization.
4152                  */
4153                 if (update_type == UPDATE_TYPE_FAST)
4154                         dc_post_update_surfaces_to_stream(dc);
4155
4156                 dc_update_planes_and_stream(dc, srf_updates,
4157                                             surface_count, stream,
4158                                             stream_update);
4159                 return;
4160         }
4161
4162         if (update_type >= update_surface_trace_level)
4163                 update_surface_trace(dc, srf_updates, surface_count);
4164
4165
4166         if (update_type >= UPDATE_TYPE_FULL) {
4167
4168                 /* initialize scratch memory for building context */
4169                 context = dc_create_state(dc);
4170                 if (context == NULL) {
4171                         DC_ERROR("Failed to allocate new validate context!\n");
4172                         return;
4173                 }
4174
4175                 dc_resource_state_copy_construct(state, context);
4176
4177                 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4178                         struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
4179                         struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
4180
4181                         if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
4182                                 new_pipe->plane_state->force_full_update = true;
4183                 }
4184         } else if (update_type == UPDATE_TYPE_FAST) {
4185                 /*
4186                  * Previous frame finished and HW is ready for optimization.
4187                  */
4188                 dc_post_update_surfaces_to_stream(dc);
4189         }
4190
4191
4192         for (i = 0; i < surface_count; i++) {
4193                 struct dc_plane_state *surface = srf_updates[i].surface;
4194
4195                 copy_surface_update_to_plane(surface, &srf_updates[i]);
4196
4197                 if (update_type >= UPDATE_TYPE_MED) {
4198                         for (j = 0; j < dc->res_pool->pipe_count; j++) {
4199                                 struct pipe_ctx *pipe_ctx =
4200                                         &context->res_ctx.pipe_ctx[j];
4201
4202                                 if (pipe_ctx->plane_state != surface)
4203                                         continue;
4204
4205                                 resource_build_scaling_params(pipe_ctx);
4206                         }
4207                 }
4208         }
4209
4210         copy_stream_update_to_stream(dc, context, stream, stream_update);
4211
4212         if (update_type >= UPDATE_TYPE_FULL) {
4213                 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
4214                         DC_ERROR("Mode validation failed for stream update!\n");
4215                         dc_release_state(context);
4216                         return;
4217                 }
4218         }
4219
4220         TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
4221
4222         commit_planes_for_stream(
4223                                 dc,
4224                                 srf_updates,
4225                                 surface_count,
4226                                 stream,
4227                                 stream_update,
4228                                 update_type,
4229                                 context);
4230         /*update current_State*/
4231         if (dc->current_state != context) {
4232
4233                 struct dc_state *old = dc->current_state;
4234
4235                 dc->current_state = context;
4236                 dc_release_state(old);
4237
4238                 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4239                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
4240
4241                         if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
4242                                 pipe_ctx->plane_state->force_full_update = false;
4243                 }
4244         }
4245
4246         /* Legacy optimization path for DCE. */
4247         if (update_type >= UPDATE_TYPE_FULL && dc_ctx->dce_version < DCE_VERSION_MAX) {
4248                 dc_post_update_surfaces_to_stream(dc);
4249                 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
4250         }
4251
4252         return;
4253
4254 }
4255
4256 uint8_t dc_get_current_stream_count(struct dc *dc)
4257 {
4258         return dc->current_state->stream_count;
4259 }
4260
4261 struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i)
4262 {
4263         if (i < dc->current_state->stream_count)
4264                 return dc->current_state->streams[i];
4265         return NULL;
4266 }
4267
4268 enum dc_irq_source dc_interrupt_to_irq_source(
4269                 struct dc *dc,
4270                 uint32_t src_id,
4271                 uint32_t ext_id)
4272 {
4273         return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
4274 }
4275
4276 /*
4277  * dc_interrupt_set() - Enable/disable an AMD hw interrupt source
4278  */
4279 bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
4280 {
4281
4282         if (dc == NULL)
4283                 return false;
4284
4285         return dal_irq_service_set(dc->res_pool->irqs, src, enable);
4286 }
4287
4288 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
4289 {
4290         dal_irq_service_ack(dc->res_pool->irqs, src);
4291 }
4292
4293 void dc_power_down_on_boot(struct dc *dc)
4294 {
4295         if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW &&
4296                         dc->hwss.power_down_on_boot)
4297                 dc->hwss.power_down_on_boot(dc);
4298 }
4299
4300 void dc_set_power_state(
4301         struct dc *dc,
4302         enum dc_acpi_cm_power_state power_state)
4303 {
4304         struct kref refcount;
4305         struct display_mode_lib *dml;
4306
4307         if (!dc->current_state)
4308                 return;
4309
4310         switch (power_state) {
4311         case DC_ACPI_CM_POWER_STATE_D0:
4312                 dc_resource_state_construct(dc, dc->current_state);
4313
4314                 dc_z10_restore(dc);
4315
4316                 if (dc->ctx->dmub_srv)
4317                         dc_dmub_srv_wait_phy_init(dc->ctx->dmub_srv);
4318
4319                 dc->hwss.init_hw(dc);
4320
4321                 if (dc->hwss.init_sys_ctx != NULL &&
4322                         dc->vm_pa_config.valid) {
4323                         dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config);
4324                 }
4325
4326                 break;
4327         default:
4328                 ASSERT(dc->current_state->stream_count == 0);
4329                 /* Zero out the current context so that on resume we start with
4330                  * clean state, and dc hw programming optimizations will not
4331                  * cause any trouble.
4332                  */
4333                 dml = kzalloc(sizeof(struct display_mode_lib),
4334                                 GFP_KERNEL);
4335
4336                 ASSERT(dml);
4337                 if (!dml)
4338                         return;
4339
4340                 /* Preserve refcount */
4341                 refcount = dc->current_state->refcount;
4342                 /* Preserve display mode lib */
4343                 memcpy(dml, &dc->current_state->bw_ctx.dml, sizeof(struct display_mode_lib));
4344
4345                 dc_resource_state_destruct(dc->current_state);
4346                 memset(dc->current_state, 0,
4347                                 sizeof(*dc->current_state));
4348
4349                 dc->current_state->refcount = refcount;
4350                 dc->current_state->bw_ctx.dml = *dml;
4351
4352                 kfree(dml);
4353
4354                 break;
4355         }
4356 }
4357
4358 void dc_resume(struct dc *dc)
4359 {
4360         uint32_t i;
4361
4362         for (i = 0; i < dc->link_count; i++)
4363                 link_resume(dc->links[i]);
4364 }
4365
4366 bool dc_is_dmcu_initialized(struct dc *dc)
4367 {
4368         struct dmcu *dmcu = dc->res_pool->dmcu;
4369
4370         if (dmcu)
4371                 return dmcu->funcs->is_dmcu_initialized(dmcu);
4372         return false;
4373 }
4374
4375 void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info)
4376 {
4377         info->displayClock                              = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz;
4378         info->engineClock                               = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_khz;
4379         info->memoryClock                               = (unsigned int)state->bw_ctx.bw.dcn.clk.dramclk_khz;
4380         info->maxSupportedDppClock              = (unsigned int)state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz;
4381         info->dppClock                                  = (unsigned int)state->bw_ctx.bw.dcn.clk.dppclk_khz;
4382         info->socClock                                  = (unsigned int)state->bw_ctx.bw.dcn.clk.socclk_khz;
4383         info->dcfClockDeepSleep                 = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz;
4384         info->fClock                                    = (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz;
4385         info->phyClock                                  = (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz;
4386 }
4387 enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping)
4388 {
4389         if (dc->hwss.set_clock)
4390                 return dc->hwss.set_clock(dc, clock_type, clk_khz, stepping);
4391         return DC_ERROR_UNEXPECTED;
4392 }
4393 void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg)
4394 {
4395         if (dc->hwss.get_clock)
4396                 dc->hwss.get_clock(dc, clock_type, clock_cfg);
4397 }
4398
4399 /* enable/disable eDP PSR without specify stream for eDP */
4400 bool dc_set_psr_allow_active(struct dc *dc, bool enable)
4401 {
4402         int i;
4403         bool allow_active;
4404
4405         for (i = 0; i < dc->current_state->stream_count ; i++) {
4406                 struct dc_link *link;
4407                 struct dc_stream_state *stream = dc->current_state->streams[i];
4408
4409                 link = stream->link;
4410                 if (!link)
4411                         continue;
4412
4413                 if (link->psr_settings.psr_feature_enabled) {
4414                         if (enable && !link->psr_settings.psr_allow_active) {
4415                                 allow_active = true;
4416                                 if (!dc_link_set_psr_allow_active(link, &allow_active, false, false, NULL))
4417                                         return false;
4418                         } else if (!enable && link->psr_settings.psr_allow_active) {
4419                                 allow_active = false;
4420                                 if (!dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL))
4421                                         return false;
4422                         }
4423                 }
4424         }
4425
4426         return true;
4427 }
4428
4429 void dc_allow_idle_optimizations(struct dc *dc, bool allow)
4430 {
4431         if (dc->debug.disable_idle_power_optimizations)
4432                 return;
4433
4434         if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->is_smu_present)
4435                 if (!dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr))
4436                         return;
4437
4438         if (allow == dc->idle_optimizations_allowed)
4439                 return;
4440
4441         if (dc->hwss.apply_idle_power_optimizations && dc->hwss.apply_idle_power_optimizations(dc, allow))
4442                 dc->idle_optimizations_allowed = allow;
4443 }
4444
4445 /* set min and max memory clock to lowest and highest DPM level, respectively */
4446 void dc_unlock_memory_clock_frequency(struct dc *dc)
4447 {
4448         if (dc->clk_mgr->funcs->set_hard_min_memclk)
4449                 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, false);
4450
4451         if (dc->clk_mgr->funcs->set_hard_max_memclk)
4452                 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
4453 }
4454
4455 /* set min memory clock to the min required for current mode, max to maxDPM */
4456 void dc_lock_memory_clock_frequency(struct dc *dc)
4457 {
4458         if (dc->clk_mgr->funcs->get_memclk_states_from_smu)
4459                 dc->clk_mgr->funcs->get_memclk_states_from_smu(dc->clk_mgr);
4460
4461         if (dc->clk_mgr->funcs->set_hard_min_memclk)
4462                 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, true);
4463
4464         if (dc->clk_mgr->funcs->set_hard_max_memclk)
4465                 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
4466 }
4467
4468 static void blank_and_force_memclk(struct dc *dc, bool apply, unsigned int memclk_mhz)
4469 {
4470         struct dc_state *context = dc->current_state;
4471         struct hubp *hubp;
4472         struct pipe_ctx *pipe;
4473         int i;
4474
4475         for (i = 0; i < dc->res_pool->pipe_count; i++) {
4476                 pipe = &context->res_ctx.pipe_ctx[i];
4477
4478                 if (pipe->stream != NULL) {
4479                         dc->hwss.disable_pixel_data(dc, pipe, true);
4480
4481                         // wait for double buffer
4482                         pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE);
4483                         pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VBLANK);
4484                         pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE);
4485
4486                         hubp = pipe->plane_res.hubp;
4487                         hubp->funcs->set_blank_regs(hubp, true);
4488                 }
4489         }
4490
4491         dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, memclk_mhz);
4492         dc->clk_mgr->funcs->set_min_memclk(dc->clk_mgr, memclk_mhz);
4493
4494         for (i = 0; i < dc->res_pool->pipe_count; i++) {
4495                 pipe = &context->res_ctx.pipe_ctx[i];
4496
4497                 if (pipe->stream != NULL) {
4498                         dc->hwss.disable_pixel_data(dc, pipe, false);
4499
4500                         hubp = pipe->plane_res.hubp;
4501                         hubp->funcs->set_blank_regs(hubp, false);
4502                 }
4503         }
4504 }
4505
4506
4507 /**
4508  * dc_enable_dcmode_clk_limit() - lower clocks in dc (battery) mode
4509  * @dc: pointer to dc of the dm calling this
4510  * @enable: True = transition to DC mode, false = transition back to AC mode
4511  *
4512  * Some SoCs define additional clock limits when in DC mode, DM should
4513  * invoke this function when the platform undergoes a power source transition
4514  * so DC can apply/unapply the limit. This interface may be disruptive to
4515  * the onscreen content.
4516  *
4517  * Context: Triggered by OS through DM interface, or manually by escape calls.
4518  * Need to hold a dclock when doing so.
4519  *
4520  * Return: none (void function)
4521  *
4522  */
4523 void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable)
4524 {
4525         uint32_t hw_internal_rev = dc->ctx->asic_id.hw_internal_rev;
4526         unsigned int softMax, maxDPM, funcMin;
4527         bool p_state_change_support;
4528
4529         if (!ASICREV_IS_BEIGE_GOBY_P(hw_internal_rev))
4530                 return;
4531
4532         softMax = dc->clk_mgr->bw_params->dc_mode_softmax_memclk;
4533         maxDPM = dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz;
4534         funcMin = (dc->clk_mgr->clks.dramclk_khz + 999) / 1000;
4535         p_state_change_support = dc->clk_mgr->clks.p_state_change_support;
4536
4537         if (enable && !dc->clk_mgr->dc_mode_softmax_enabled) {
4538                 if (p_state_change_support) {
4539                         if (funcMin <= softMax)
4540                                 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, softMax);
4541                         // else: No-Op
4542                 } else {
4543                         if (funcMin <= softMax)
4544                                 blank_and_force_memclk(dc, true, softMax);
4545                         // else: No-Op
4546                 }
4547         } else if (!enable && dc->clk_mgr->dc_mode_softmax_enabled) {
4548                 if (p_state_change_support) {
4549                         if (funcMin <= softMax)
4550                                 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, maxDPM);
4551                         // else: No-Op
4552                 } else {
4553                         if (funcMin <= softMax)
4554                                 blank_and_force_memclk(dc, true, maxDPM);
4555                         // else: No-Op
4556                 }
4557         }
4558         dc->clk_mgr->dc_mode_softmax_enabled = enable;
4559 }
4560 bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane,
4561                 struct dc_cursor_attributes *cursor_attr)
4562 {
4563         if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, plane, cursor_attr))
4564                 return true;
4565         return false;
4566 }
4567
4568 /* cleanup on driver unload */
4569 void dc_hardware_release(struct dc *dc)
4570 {
4571         dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(dc);
4572
4573         if (dc->hwss.hardware_release)
4574                 dc->hwss.hardware_release(dc);
4575 }
4576
4577 void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc)
4578 {
4579         if (dc->current_state)
4580                 dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching_shut_down = true;
4581 }
4582
4583 /**
4584  * dc_is_dmub_outbox_supported - Check if DMUB firmware support outbox notification
4585  *
4586  * @dc: [in] dc structure
4587  *
4588  * Checks whether DMUB FW supports outbox notifications, if supported DM
4589  * should register outbox interrupt prior to actually enabling interrupts
4590  * via dc_enable_dmub_outbox
4591  *
4592  * Return:
4593  * True if DMUB FW supports outbox notifications, False otherwise
4594  */
4595 bool dc_is_dmub_outbox_supported(struct dc *dc)
4596 {
4597         /* DCN31 B0 USB4 DPIA needs dmub notifications for interrupts */
4598         if (dc->ctx->asic_id.chip_family == FAMILY_YELLOW_CARP &&
4599             dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 &&
4600             !dc->debug.dpia_debug.bits.disable_dpia)
4601                 return true;
4602
4603         if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_1 &&
4604             !dc->debug.dpia_debug.bits.disable_dpia)
4605                 return true;
4606
4607         /* dmub aux needs dmub notifications to be enabled */
4608         return dc->debug.enable_dmub_aux_for_legacy_ddc;
4609 }
4610
4611 /**
4612  * dc_enable_dmub_notifications - Check if dmub fw supports outbox
4613  *
4614  * @dc: [in] dc structure
4615  *
4616  * Calls dc_is_dmub_outbox_supported to check if dmub fw supports outbox
4617  * notifications. All DMs shall switch to dc_is_dmub_outbox_supported.  This
4618  * API shall be removed after switching.
4619  *
4620  * Return:
4621  * True if DMUB FW supports outbox notifications, False otherwise
4622  */
4623 bool dc_enable_dmub_notifications(struct dc *dc)
4624 {
4625         return dc_is_dmub_outbox_supported(dc);
4626 }
4627
4628 /**
4629  * dc_enable_dmub_outbox - Enables DMUB unsolicited notification
4630  *
4631  * @dc: [in] dc structure
4632  *
4633  * Enables DMUB unsolicited notifications to x86 via outbox.
4634  */
4635 void dc_enable_dmub_outbox(struct dc *dc)
4636 {
4637         struct dc_context *dc_ctx = dc->ctx;
4638
4639         dmub_enable_outbox_notification(dc_ctx->dmub_srv);
4640         DC_LOG_DC("%s: dmub outbox notifications enabled\n", __func__);
4641 }
4642
4643 /**
4644  * dc_process_dmub_aux_transfer_async - Submits aux command to dmub via inbox message
4645  *                                      Sets port index appropriately for legacy DDC
4646  * @dc: dc structure
4647  * @link_index: link index
4648  * @payload: aux payload
4649  *
4650  * Returns: True if successful, False if failure
4651  */
4652 bool dc_process_dmub_aux_transfer_async(struct dc *dc,
4653                                 uint32_t link_index,
4654                                 struct aux_payload *payload)
4655 {
4656         uint8_t action;
4657         union dmub_rb_cmd cmd = {0};
4658         struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
4659
4660         ASSERT(payload->length <= 16);
4661
4662         cmd.dp_aux_access.header.type = DMUB_CMD__DP_AUX_ACCESS;
4663         cmd.dp_aux_access.header.payload_bytes = 0;
4664         /* For dpia, ddc_pin is set to NULL */
4665         if (!dc->links[link_index]->ddc->ddc_pin)
4666                 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_DPIA;
4667         else
4668                 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_LEGACY_DDC;
4669
4670         cmd.dp_aux_access.aux_control.instance = dc->links[link_index]->ddc_hw_inst;
4671         cmd.dp_aux_access.aux_control.sw_crc_enabled = 0;
4672         cmd.dp_aux_access.aux_control.timeout = 0;
4673         cmd.dp_aux_access.aux_control.dpaux.address = payload->address;
4674         cmd.dp_aux_access.aux_control.dpaux.is_i2c_over_aux = payload->i2c_over_aux;
4675         cmd.dp_aux_access.aux_control.dpaux.length = payload->length;
4676
4677         /* set aux action */
4678         if (payload->i2c_over_aux) {
4679                 if (payload->write) {
4680                         if (payload->mot)
4681                                 action = DP_AUX_REQ_ACTION_I2C_WRITE_MOT;
4682                         else
4683                                 action = DP_AUX_REQ_ACTION_I2C_WRITE;
4684                 } else {
4685                         if (payload->mot)
4686                                 action = DP_AUX_REQ_ACTION_I2C_READ_MOT;
4687                         else
4688                                 action = DP_AUX_REQ_ACTION_I2C_READ;
4689                         }
4690         } else {
4691                 if (payload->write)
4692                         action = DP_AUX_REQ_ACTION_DPCD_WRITE;
4693                 else
4694                         action = DP_AUX_REQ_ACTION_DPCD_READ;
4695         }
4696
4697         cmd.dp_aux_access.aux_control.dpaux.action = action;
4698
4699         if (payload->length && payload->write) {
4700                 memcpy(cmd.dp_aux_access.aux_control.dpaux.data,
4701                         payload->data,
4702                         payload->length
4703                         );
4704         }
4705
4706         dc_dmub_srv_cmd_queue(dmub_srv, &cmd);
4707         dc_dmub_srv_cmd_execute(dmub_srv);
4708         dc_dmub_srv_wait_idle(dmub_srv);
4709
4710         return true;
4711 }
4712
4713 uint8_t get_link_index_from_dpia_port_index(const struct dc *dc,
4714                                             uint8_t dpia_port_index)
4715 {
4716         uint8_t index, link_index = 0xFF;
4717
4718         for (index = 0; index < dc->link_count; index++) {
4719                 /* ddc_hw_inst has dpia port index for dpia links
4720                  * and ddc instance for legacy links
4721                  */
4722                 if (!dc->links[index]->ddc->ddc_pin) {
4723                         if (dc->links[index]->ddc_hw_inst == dpia_port_index) {
4724                                 link_index = index;
4725                                 break;
4726                         }
4727                 }
4728         }
4729         ASSERT(link_index != 0xFF);
4730         return link_index;
4731 }
4732
4733 /**
4734  * dc_process_dmub_set_config_async - Submits set_config command
4735  *
4736  * @dc: [in] dc structure
4737  * @link_index: [in] link_index: link index
4738  * @payload: [in] aux payload
4739  * @notify: [out] set_config immediate reply
4740  *
4741  * Submits set_config command to dmub via inbox message.
4742  *
4743  * Return:
4744  * True if successful, False if failure
4745  */
4746 bool dc_process_dmub_set_config_async(struct dc *dc,
4747                                 uint32_t link_index,
4748                                 struct set_config_cmd_payload *payload,
4749                                 struct dmub_notification *notify)
4750 {
4751         union dmub_rb_cmd cmd = {0};
4752         struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
4753         bool is_cmd_complete = true;
4754
4755         /* prepare SET_CONFIG command */
4756         cmd.set_config_access.header.type = DMUB_CMD__DPIA;
4757         cmd.set_config_access.header.sub_type = DMUB_CMD__DPIA_SET_CONFIG_ACCESS;
4758
4759         cmd.set_config_access.set_config_control.instance = dc->links[link_index]->ddc_hw_inst;
4760         cmd.set_config_access.set_config_control.cmd_pkt.msg_type = payload->msg_type;
4761         cmd.set_config_access.set_config_control.cmd_pkt.msg_data = payload->msg_data;
4762
4763         if (!dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd)) {
4764                 /* command is not processed by dmub */
4765                 notify->sc_status = SET_CONFIG_UNKNOWN_ERROR;
4766                 return is_cmd_complete;
4767         }
4768
4769         /* command processed by dmub, if ret_status is 1, it is completed instantly */
4770         if (cmd.set_config_access.header.ret_status == 1)
4771                 notify->sc_status = cmd.set_config_access.set_config_control.immed_status;
4772         else
4773                 /* cmd pending, will receive notification via outbox */
4774                 is_cmd_complete = false;
4775
4776         return is_cmd_complete;
4777 }
4778
4779 /**
4780  * dc_process_dmub_set_mst_slots - Submits MST solt allocation
4781  *
4782  * @dc: [in] dc structure
4783  * @link_index: [in] link index
4784  * @mst_alloc_slots: [in] mst slots to be allotted
4785  * @mst_slots_in_use: [out] mst slots in use returned in failure case
4786  *
4787  * Submits mst slot allocation command to dmub via inbox message
4788  *
4789  * Return:
4790  * DC_OK if successful, DC_ERROR if failure
4791  */
4792 enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
4793                                 uint32_t link_index,
4794                                 uint8_t mst_alloc_slots,
4795                                 uint8_t *mst_slots_in_use)
4796 {
4797         union dmub_rb_cmd cmd = {0};
4798         struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
4799
4800         /* prepare MST_ALLOC_SLOTS command */
4801         cmd.set_mst_alloc_slots.header.type = DMUB_CMD__DPIA;
4802         cmd.set_mst_alloc_slots.header.sub_type = DMUB_CMD__DPIA_MST_ALLOC_SLOTS;
4803
4804         cmd.set_mst_alloc_slots.mst_slots_control.instance = dc->links[link_index]->ddc_hw_inst;
4805         cmd.set_mst_alloc_slots.mst_slots_control.mst_alloc_slots = mst_alloc_slots;
4806
4807         if (!dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd))
4808                 /* command is not processed by dmub */
4809                 return DC_ERROR_UNEXPECTED;
4810
4811         /* command processed by dmub, if ret_status is 1 */
4812         if (cmd.set_config_access.header.ret_status != 1)
4813                 /* command processing error */
4814                 return DC_ERROR_UNEXPECTED;
4815
4816         /* command processed and we have a status of 2, mst not enabled in dpia */
4817         if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 2)
4818                 return DC_FAIL_UNSUPPORTED_1;
4819
4820         /* previously configured mst alloc and used slots did not match */
4821         if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 3) {
4822                 *mst_slots_in_use = cmd.set_mst_alloc_slots.mst_slots_control.mst_slots_in_use;
4823                 return DC_NOT_SUPPORTED;
4824         }
4825
4826         return DC_OK;
4827 }
4828
4829 /**
4830  * dc_process_dmub_dpia_hpd_int_enable - Submits DPIA DPD interruption
4831  *
4832  * @dc: [in] dc structure
4833  * @hpd_int_enable: [in] 1 for hpd int enable, 0 to disable
4834  *
4835  * Submits dpia hpd int enable command to dmub via inbox message
4836  */
4837 void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc,
4838                                 uint32_t hpd_int_enable)
4839 {
4840         union dmub_rb_cmd cmd = {0};
4841         struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
4842
4843         cmd.dpia_hpd_int_enable.header.type = DMUB_CMD__DPIA_HPD_INT_ENABLE;
4844         cmd.dpia_hpd_int_enable.enable = hpd_int_enable;
4845
4846         dc_dmub_srv_cmd_queue(dmub_srv, &cmd);
4847         dc_dmub_srv_cmd_execute(dmub_srv);
4848         dc_dmub_srv_wait_idle(dmub_srv);
4849
4850         DC_LOG_DEBUG("%s: hpd_int_enable(%d)\n", __func__, hpd_int_enable);
4851 }
4852
4853 /**
4854  * dc_disable_accelerated_mode - disable accelerated mode
4855  * @dc: dc structure
4856  */
4857 void dc_disable_accelerated_mode(struct dc *dc)
4858 {
4859         bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 0);
4860 }
4861
4862
4863 /**
4864  *  dc_notify_vsync_int_state - notifies vsync enable/disable state
4865  *  @dc: dc structure
4866  *  @stream: stream where vsync int state changed
4867  *  @enable: whether vsync is enabled or disabled
4868  *
4869  *  Called when vsync is enabled/disabled Will notify DMUB to start/stop ABM
4870  *  interrupts after steady state is reached.
4871  */
4872 void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bool enable)
4873 {
4874         int i;
4875         int edp_num;
4876         struct pipe_ctx *pipe = NULL;
4877         struct dc_link *link = stream->sink->link;
4878         struct dc_link *edp_links[MAX_NUM_EDP];
4879
4880
4881         if (link->psr_settings.psr_feature_enabled)
4882                 return;
4883
4884         /*find primary pipe associated with stream*/
4885         for (i = 0; i < MAX_PIPES; i++) {
4886                 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
4887
4888                 if (pipe->stream == stream && pipe->stream_res.tg)
4889                         break;
4890         }
4891
4892         if (i == MAX_PIPES) {
4893                 ASSERT(0);
4894                 return;
4895         }
4896
4897         dc_get_edp_links(dc, edp_links, &edp_num);
4898
4899         /* Determine panel inst */
4900         for (i = 0; i < edp_num; i++) {
4901                 if (edp_links[i] == link)
4902                         break;
4903         }
4904
4905         if (i == edp_num) {
4906                 return;
4907         }
4908
4909         if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause)
4910                 pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst);
4911 }
4912
4913 /**
4914  * dc_extended_blank_supported - Decide whether extended blank is supported
4915  *
4916  * @dc: [in] Current DC state
4917  *
4918  * Extended blank is a freesync optimization feature to be enabled in the
4919  * future.  During the extra vblank period gained from freesync, we have the
4920  * ability to enter z9/z10.
4921  *
4922  * Return:
4923  * Indicate whether extended blank is supported (%true or %false)
4924  */
4925 bool dc_extended_blank_supported(struct dc *dc)
4926 {
4927         return dc->debug.extended_blank_optimization && !dc->debug.disable_z10
4928                 && dc->caps.zstate_support && dc->caps.is_apu;
4929 }