1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2015 MediaTek Inc.
7 #include <linux/dma-mapping.h>
8 #include <linux/mailbox_controller.h>
9 #include <linux/pm_runtime.h>
10 #include <linux/soc/mediatek/mtk-cmdq.h>
11 #include <linux/soc/mediatek/mtk-mmsys.h>
12 #include <linux/soc/mediatek/mtk-mutex.h>
14 #include <asm/barrier.h>
15 #include <soc/mediatek/smi.h>
17 #include <drm/drm_atomic.h>
18 #include <drm/drm_atomic_helper.h>
19 #include <drm/drm_plane_helper.h>
20 #include <drm/drm_probe_helper.h>
21 #include <drm/drm_vblank.h>
23 #include "mtk_drm_drv.h"
24 #include "mtk_drm_crtc.h"
25 #include "mtk_drm_ddp_comp.h"
26 #include "mtk_drm_gem.h"
27 #include "mtk_drm_plane.h"
30 * struct mtk_drm_crtc - MediaTek specific crtc structure.
32 * @enabled: records whether crtc_enable succeeded
33 * @planes: array of 4 drm_plane structures, one for each overlay plane
34 * @pending_planes: whether any plane has pending changes to be applied
35 * @mmsys_dev: pointer to the mmsys device for configuration registers
36 * @mutex: handle to one of the ten disp_mutex streams
37 * @ddp_comp_nr: number of components in ddp_comp
38 * @ddp_comp: array of pointers the mtk_ddp_comp structures used by this crtc
40 * TODO: Needs update: this header is missing a bunch of member descriptions.
46 bool pending_needs_vblank;
47 struct drm_pending_vblank_event *event;
49 struct drm_plane *planes;
50 unsigned int layer_nr;
52 bool pending_async_planes;
54 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
55 struct mbox_client cmdq_cl;
56 struct mbox_chan *cmdq_chan;
57 struct cmdq_pkt cmdq_handle;
62 struct device *mmsys_dev;
63 struct mtk_mutex *mutex;
64 unsigned int ddp_comp_nr;
65 struct mtk_ddp_comp **ddp_comp;
67 /* lock for display hardware access */
72 struct mtk_crtc_state {
73 struct drm_crtc_state base;
76 unsigned int pending_width;
77 unsigned int pending_height;
78 unsigned int pending_vrefresh;
81 static inline struct mtk_drm_crtc *to_mtk_crtc(struct drm_crtc *c)
83 return container_of(c, struct mtk_drm_crtc, base);
86 static inline struct mtk_crtc_state *to_mtk_crtc_state(struct drm_crtc_state *s)
88 return container_of(s, struct mtk_crtc_state, base);
91 static void mtk_drm_crtc_finish_page_flip(struct mtk_drm_crtc *mtk_crtc)
93 struct drm_crtc *crtc = &mtk_crtc->base;
96 spin_lock_irqsave(&crtc->dev->event_lock, flags);
97 drm_crtc_send_vblank_event(crtc, mtk_crtc->event);
98 drm_crtc_vblank_put(crtc);
99 mtk_crtc->event = NULL;
100 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
103 static void mtk_drm_finish_page_flip(struct mtk_drm_crtc *mtk_crtc)
105 drm_crtc_handle_vblank(&mtk_crtc->base);
106 if (!mtk_crtc->config_updating && mtk_crtc->pending_needs_vblank) {
107 mtk_drm_crtc_finish_page_flip(mtk_crtc);
108 mtk_crtc->pending_needs_vblank = false;
112 static void mtk_drm_crtc_destroy(struct drm_crtc *crtc)
114 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
116 mtk_mutex_put(mtk_crtc->mutex);
118 drm_crtc_cleanup(crtc);
121 static void mtk_drm_crtc_reset(struct drm_crtc *crtc)
123 struct mtk_crtc_state *state;
126 __drm_atomic_helper_crtc_destroy_state(crtc->state);
128 kfree(to_mtk_crtc_state(crtc->state));
131 state = kzalloc(sizeof(*state), GFP_KERNEL);
133 __drm_atomic_helper_crtc_reset(crtc, &state->base);
136 static struct drm_crtc_state *mtk_drm_crtc_duplicate_state(struct drm_crtc *crtc)
138 struct mtk_crtc_state *state;
140 state = kzalloc(sizeof(*state), GFP_KERNEL);
144 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
146 WARN_ON(state->base.crtc != crtc);
147 state->base.crtc = crtc;
152 static void mtk_drm_crtc_destroy_state(struct drm_crtc *crtc,
153 struct drm_crtc_state *state)
155 __drm_atomic_helper_crtc_destroy_state(state);
156 kfree(to_mtk_crtc_state(state));
159 static bool mtk_drm_crtc_mode_fixup(struct drm_crtc *crtc,
160 const struct drm_display_mode *mode,
161 struct drm_display_mode *adjusted_mode)
163 /* Nothing to do here, but this callback is mandatory. */
167 static void mtk_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
169 struct mtk_crtc_state *state = to_mtk_crtc_state(crtc->state);
171 state->pending_width = crtc->mode.hdisplay;
172 state->pending_height = crtc->mode.vdisplay;
173 state->pending_vrefresh = drm_mode_vrefresh(&crtc->mode);
174 wmb(); /* Make sure the above parameters are set before update */
175 state->pending_config = true;
178 static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc)
183 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
184 ret = mtk_ddp_comp_clk_enable(mtk_crtc->ddp_comp[i]);
186 DRM_ERROR("Failed to enable clock %d: %d\n", i, ret);
194 mtk_ddp_comp_clk_disable(mtk_crtc->ddp_comp[i]);
198 static void mtk_crtc_ddp_clk_disable(struct mtk_drm_crtc *mtk_crtc)
202 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
203 mtk_ddp_comp_clk_disable(mtk_crtc->ddp_comp[i]);
207 struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc,
208 struct drm_plane *plane,
209 unsigned int *local_layer)
211 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
212 struct mtk_ddp_comp *comp;
214 unsigned int local_index = plane - mtk_crtc->planes;
216 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
217 comp = mtk_crtc->ddp_comp[i];
218 if (local_index < (count + mtk_ddp_comp_layer_nr(comp))) {
219 *local_layer = local_index - count;
222 count += mtk_ddp_comp_layer_nr(comp);
225 WARN(1, "Failed to find component for plane %d\n", plane->index);
229 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
230 static int mtk_drm_cmdq_pkt_create(struct mbox_chan *chan, struct cmdq_pkt *pkt,
236 pkt->va_base = kzalloc(size, GFP_KERNEL);
241 pkt->buf_size = size;
243 dev = chan->mbox->dev;
244 dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size,
246 if (dma_mapping_error(dev, dma_addr)) {
247 dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size);
253 pkt->pa_base = dma_addr;
258 static void mtk_drm_cmdq_pkt_destroy(struct mbox_chan *chan, struct cmdq_pkt *pkt)
260 dma_unmap_single(chan->mbox->dev, pkt->pa_base, pkt->buf_size,
266 static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg)
268 struct mtk_drm_crtc *mtk_crtc = container_of(cl, struct mtk_drm_crtc, cmdq_cl);
269 struct cmdq_cb_data *data = mssg;
270 struct mtk_crtc_state *state;
273 state = to_mtk_crtc_state(mtk_crtc->base.state);
275 state->pending_config = false;
277 if (mtk_crtc->pending_planes) {
278 for (i = 0; i < mtk_crtc->layer_nr; i++) {
279 struct drm_plane *plane = &mtk_crtc->planes[i];
280 struct mtk_plane_state *plane_state;
282 plane_state = to_mtk_plane_state(plane->state);
284 plane_state->pending.config = false;
286 mtk_crtc->pending_planes = false;
289 if (mtk_crtc->pending_async_planes) {
290 for (i = 0; i < mtk_crtc->layer_nr; i++) {
291 struct drm_plane *plane = &mtk_crtc->planes[i];
292 struct mtk_plane_state *plane_state;
294 plane_state = to_mtk_plane_state(plane->state);
296 plane_state->pending.async_config = false;
298 mtk_crtc->pending_async_planes = false;
301 mtk_crtc->cmdq_vblank_cnt = 0;
302 mtk_drm_cmdq_pkt_destroy(mtk_crtc->cmdq_chan, data->pkt);
306 static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
308 struct drm_crtc *crtc = &mtk_crtc->base;
309 struct drm_connector *connector;
310 struct drm_encoder *encoder;
311 struct drm_connector_list_iter conn_iter;
312 unsigned int width, height, vrefresh, bpc = MTK_MAX_BPC;
316 if (WARN_ON(!crtc->state))
319 width = crtc->state->adjusted_mode.hdisplay;
320 height = crtc->state->adjusted_mode.vdisplay;
321 vrefresh = drm_mode_vrefresh(&crtc->state->adjusted_mode);
323 drm_for_each_encoder(encoder, crtc->dev) {
324 if (encoder->crtc != crtc)
327 drm_connector_list_iter_begin(crtc->dev, &conn_iter);
328 drm_for_each_connector_iter(connector, &conn_iter) {
329 if (connector->encoder != encoder)
331 if (connector->display_info.bpc != 0 &&
332 bpc > connector->display_info.bpc)
333 bpc = connector->display_info.bpc;
335 drm_connector_list_iter_end(&conn_iter);
338 ret = pm_runtime_resume_and_get(crtc->dev->dev);
340 DRM_ERROR("Failed to enable power domain: %d\n", ret);
344 ret = mtk_mutex_prepare(mtk_crtc->mutex);
346 DRM_ERROR("Failed to enable mutex clock: %d\n", ret);
347 goto err_pm_runtime_put;
350 ret = mtk_crtc_ddp_clk_enable(mtk_crtc);
352 DRM_ERROR("Failed to enable component clocks: %d\n", ret);
353 goto err_mutex_unprepare;
356 for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) {
357 mtk_mmsys_ddp_connect(mtk_crtc->mmsys_dev,
358 mtk_crtc->ddp_comp[i]->id,
359 mtk_crtc->ddp_comp[i + 1]->id);
360 mtk_mutex_add_comp(mtk_crtc->mutex,
361 mtk_crtc->ddp_comp[i]->id);
363 mtk_mutex_add_comp(mtk_crtc->mutex, mtk_crtc->ddp_comp[i]->id);
364 mtk_mutex_enable(mtk_crtc->mutex);
366 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
367 struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[i];
370 mtk_ddp_comp_bgclr_in_on(comp);
372 mtk_ddp_comp_config(comp, width, height, vrefresh, bpc, NULL);
373 mtk_ddp_comp_start(comp);
376 /* Initially configure all planes */
377 for (i = 0; i < mtk_crtc->layer_nr; i++) {
378 struct drm_plane *plane = &mtk_crtc->planes[i];
379 struct mtk_plane_state *plane_state;
380 struct mtk_ddp_comp *comp;
381 unsigned int local_layer;
383 plane_state = to_mtk_plane_state(plane->state);
384 comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer);
386 mtk_ddp_comp_layer_config(comp, local_layer,
393 mtk_mutex_unprepare(mtk_crtc->mutex);
395 pm_runtime_put(crtc->dev->dev);
399 static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc)
401 struct drm_device *drm = mtk_crtc->base.dev;
402 struct drm_crtc *crtc = &mtk_crtc->base;
405 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
406 mtk_ddp_comp_stop(mtk_crtc->ddp_comp[i]);
408 mtk_ddp_comp_bgclr_in_off(mtk_crtc->ddp_comp[i]);
411 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
412 mtk_mutex_remove_comp(mtk_crtc->mutex,
413 mtk_crtc->ddp_comp[i]->id);
414 mtk_mutex_disable(mtk_crtc->mutex);
415 for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) {
416 mtk_mmsys_ddp_disconnect(mtk_crtc->mmsys_dev,
417 mtk_crtc->ddp_comp[i]->id,
418 mtk_crtc->ddp_comp[i + 1]->id);
419 mtk_mutex_remove_comp(mtk_crtc->mutex,
420 mtk_crtc->ddp_comp[i]->id);
422 mtk_mutex_remove_comp(mtk_crtc->mutex, mtk_crtc->ddp_comp[i]->id);
423 mtk_crtc_ddp_clk_disable(mtk_crtc);
424 mtk_mutex_unprepare(mtk_crtc->mutex);
426 pm_runtime_put(drm->dev);
428 if (crtc->state->event && !crtc->state->active) {
429 spin_lock_irq(&crtc->dev->event_lock);
430 drm_crtc_send_vblank_event(crtc, crtc->state->event);
431 crtc->state->event = NULL;
432 spin_unlock_irq(&crtc->dev->event_lock);
436 static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
437 struct cmdq_pkt *cmdq_handle)
439 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
440 struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state);
441 struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
443 unsigned int local_layer;
446 * TODO: instead of updating the registers here, we should prepare
447 * working registers in atomic_commit and let the hardware command
448 * queue update module registers on vblank.
450 if (state->pending_config) {
451 mtk_ddp_comp_config(comp, state->pending_width,
452 state->pending_height,
453 state->pending_vrefresh, 0,
457 state->pending_config = false;
460 if (mtk_crtc->pending_planes) {
461 for (i = 0; i < mtk_crtc->layer_nr; i++) {
462 struct drm_plane *plane = &mtk_crtc->planes[i];
463 struct mtk_plane_state *plane_state;
465 plane_state = to_mtk_plane_state(plane->state);
467 if (!plane_state->pending.config)
470 comp = mtk_drm_ddp_comp_for_plane(crtc, plane,
474 mtk_ddp_comp_layer_config(comp, local_layer,
478 plane_state->pending.config = false;
482 mtk_crtc->pending_planes = false;
485 if (mtk_crtc->pending_async_planes) {
486 for (i = 0; i < mtk_crtc->layer_nr; i++) {
487 struct drm_plane *plane = &mtk_crtc->planes[i];
488 struct mtk_plane_state *plane_state;
490 plane_state = to_mtk_plane_state(plane->state);
492 if (!plane_state->pending.async_config)
495 comp = mtk_drm_ddp_comp_for_plane(crtc, plane,
499 mtk_ddp_comp_layer_config(comp, local_layer,
503 plane_state->pending.async_config = false;
507 mtk_crtc->pending_async_planes = false;
511 static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc,
514 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
515 struct cmdq_pkt *cmdq_handle = &mtk_crtc->cmdq_handle;
517 struct drm_crtc *crtc = &mtk_crtc->base;
518 struct mtk_drm_private *priv = crtc->dev->dev_private;
519 unsigned int pending_planes = 0, pending_async_planes = 0;
522 mutex_lock(&mtk_crtc->hw_lock);
523 mtk_crtc->config_updating = true;
525 mtk_crtc->pending_needs_vblank = true;
527 for (i = 0; i < mtk_crtc->layer_nr; i++) {
528 struct drm_plane *plane = &mtk_crtc->planes[i];
529 struct mtk_plane_state *plane_state;
531 plane_state = to_mtk_plane_state(plane->state);
532 if (plane_state->pending.dirty) {
533 plane_state->pending.config = true;
534 plane_state->pending.dirty = false;
535 pending_planes |= BIT(i);
536 } else if (plane_state->pending.async_dirty) {
537 plane_state->pending.async_config = true;
538 plane_state->pending.async_dirty = false;
539 pending_async_planes |= BIT(i);
543 mtk_crtc->pending_planes = true;
544 if (pending_async_planes)
545 mtk_crtc->pending_async_planes = true;
547 if (priv->data->shadow_register) {
548 mtk_mutex_acquire(mtk_crtc->mutex);
549 mtk_crtc_ddp_config(crtc, NULL);
550 mtk_mutex_release(mtk_crtc->mutex);
552 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
553 if (mtk_crtc->cmdq_chan) {
554 mbox_flush(mtk_crtc->cmdq_chan, 2000);
555 cmdq_handle->cmd_buf_size = 0;
556 cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event);
557 cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event, false);
558 mtk_crtc_ddp_config(crtc, cmdq_handle);
559 cmdq_pkt_finalize(cmdq_handle);
560 dma_sync_single_for_device(mtk_crtc->cmdq_chan->mbox->dev,
561 cmdq_handle->pa_base,
562 cmdq_handle->cmd_buf_size,
565 * CMDQ command should execute in next vblank,
566 * If it fail to execute in next 2 vblank, timeout happen.
568 mtk_crtc->cmdq_vblank_cnt = 2;
569 mbox_send_message(mtk_crtc->cmdq_chan, cmdq_handle);
570 mbox_client_txdone(mtk_crtc->cmdq_chan, 0);
573 mtk_crtc->config_updating = false;
574 mutex_unlock(&mtk_crtc->hw_lock);
577 static void mtk_crtc_ddp_irq(void *data)
579 struct drm_crtc *crtc = data;
580 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
581 struct mtk_drm_private *priv = crtc->dev->dev_private;
583 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
584 if (!priv->data->shadow_register && !mtk_crtc->cmdq_chan)
585 mtk_crtc_ddp_config(crtc, NULL);
586 else if (mtk_crtc->cmdq_vblank_cnt > 0 && --mtk_crtc->cmdq_vblank_cnt == 0)
587 DRM_ERROR("mtk_crtc %d CMDQ execute command timeout!\n",
588 drm_crtc_index(&mtk_crtc->base));
590 if (!priv->data->shadow_register)
591 mtk_crtc_ddp_config(crtc, NULL);
593 mtk_drm_finish_page_flip(mtk_crtc);
596 static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc)
598 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
599 struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
601 mtk_ddp_comp_enable_vblank(comp, mtk_crtc_ddp_irq, &mtk_crtc->base);
606 static void mtk_drm_crtc_disable_vblank(struct drm_crtc *crtc)
608 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
609 struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
611 mtk_ddp_comp_disable_vblank(comp);
614 int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane,
615 struct mtk_plane_state *state)
617 unsigned int local_layer;
618 struct mtk_ddp_comp *comp;
620 comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer);
622 return mtk_ddp_comp_layer_check(comp, local_layer, state);
626 void mtk_drm_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane,
627 struct drm_atomic_state *state)
629 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
631 if (!mtk_crtc->enabled)
634 mtk_drm_crtc_update_config(mtk_crtc, false);
637 static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
638 struct drm_atomic_state *state)
640 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
641 struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
644 DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id);
646 ret = mtk_smi_larb_get(comp->larb_dev);
648 DRM_ERROR("Failed to get larb: %d\n", ret);
652 ret = mtk_crtc_ddp_hw_init(mtk_crtc);
654 mtk_smi_larb_put(comp->larb_dev);
658 drm_crtc_vblank_on(crtc);
659 mtk_crtc->enabled = true;
662 static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
663 struct drm_atomic_state *state)
665 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
666 struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
669 DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id);
670 if (!mtk_crtc->enabled)
673 /* Set all pending plane state to disabled */
674 for (i = 0; i < mtk_crtc->layer_nr; i++) {
675 struct drm_plane *plane = &mtk_crtc->planes[i];
676 struct mtk_plane_state *plane_state;
678 plane_state = to_mtk_plane_state(plane->state);
679 plane_state->pending.enable = false;
680 plane_state->pending.config = true;
682 mtk_crtc->pending_planes = true;
684 mtk_drm_crtc_update_config(mtk_crtc, false);
685 /* Wait for planes to be disabled */
686 drm_crtc_wait_one_vblank(crtc);
688 drm_crtc_vblank_off(crtc);
689 mtk_crtc_ddp_hw_fini(mtk_crtc);
690 mtk_smi_larb_put(comp->larb_dev);
692 mtk_crtc->enabled = false;
695 static void mtk_drm_crtc_atomic_begin(struct drm_crtc *crtc,
696 struct drm_atomic_state *state)
698 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
700 struct mtk_crtc_state *mtk_crtc_state = to_mtk_crtc_state(crtc_state);
701 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
703 if (mtk_crtc->event && mtk_crtc_state->base.event)
704 DRM_ERROR("new event while there is still a pending event\n");
706 if (mtk_crtc_state->base.event) {
707 mtk_crtc_state->base.event->pipe = drm_crtc_index(crtc);
708 WARN_ON(drm_crtc_vblank_get(crtc) != 0);
709 mtk_crtc->event = mtk_crtc_state->base.event;
710 mtk_crtc_state->base.event = NULL;
714 static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc,
715 struct drm_atomic_state *state)
717 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
720 if (crtc->state->color_mgmt_changed)
721 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
722 mtk_ddp_gamma_set(mtk_crtc->ddp_comp[i], crtc->state);
723 mtk_ddp_ctm_set(mtk_crtc->ddp_comp[i], crtc->state);
725 mtk_drm_crtc_update_config(mtk_crtc, !!mtk_crtc->event);
728 static const struct drm_crtc_funcs mtk_crtc_funcs = {
729 .set_config = drm_atomic_helper_set_config,
730 .page_flip = drm_atomic_helper_page_flip,
731 .destroy = mtk_drm_crtc_destroy,
732 .reset = mtk_drm_crtc_reset,
733 .atomic_duplicate_state = mtk_drm_crtc_duplicate_state,
734 .atomic_destroy_state = mtk_drm_crtc_destroy_state,
735 .enable_vblank = mtk_drm_crtc_enable_vblank,
736 .disable_vblank = mtk_drm_crtc_disable_vblank,
739 static const struct drm_crtc_helper_funcs mtk_crtc_helper_funcs = {
740 .mode_fixup = mtk_drm_crtc_mode_fixup,
741 .mode_set_nofb = mtk_drm_crtc_mode_set_nofb,
742 .atomic_begin = mtk_drm_crtc_atomic_begin,
743 .atomic_flush = mtk_drm_crtc_atomic_flush,
744 .atomic_enable = mtk_drm_crtc_atomic_enable,
745 .atomic_disable = mtk_drm_crtc_atomic_disable,
748 static int mtk_drm_crtc_init(struct drm_device *drm,
749 struct mtk_drm_crtc *mtk_crtc,
752 struct drm_plane *primary = NULL;
753 struct drm_plane *cursor = NULL;
756 for (i = 0; i < mtk_crtc->layer_nr; i++) {
757 if (mtk_crtc->planes[i].type == DRM_PLANE_TYPE_PRIMARY)
758 primary = &mtk_crtc->planes[i];
759 else if (mtk_crtc->planes[i].type == DRM_PLANE_TYPE_CURSOR)
760 cursor = &mtk_crtc->planes[i];
763 ret = drm_crtc_init_with_planes(drm, &mtk_crtc->base, primary, cursor,
764 &mtk_crtc_funcs, NULL);
766 goto err_cleanup_crtc;
768 drm_crtc_helper_add(&mtk_crtc->base, &mtk_crtc_helper_funcs);
773 drm_crtc_cleanup(&mtk_crtc->base);
777 static int mtk_drm_crtc_num_comp_planes(struct mtk_drm_crtc *mtk_crtc,
780 struct mtk_ddp_comp *comp;
785 comp = mtk_crtc->ddp_comp[comp_idx];
789 if (comp_idx == 1 && !comp->funcs->bgclr_in_on)
792 return mtk_ddp_comp_layer_nr(comp);
796 enum drm_plane_type mtk_drm_crtc_plane_type(unsigned int plane_idx,
797 unsigned int num_planes)
800 return DRM_PLANE_TYPE_PRIMARY;
801 else if (plane_idx == (num_planes - 1))
802 return DRM_PLANE_TYPE_CURSOR;
804 return DRM_PLANE_TYPE_OVERLAY;
808 static int mtk_drm_crtc_init_comp_planes(struct drm_device *drm_dev,
809 struct mtk_drm_crtc *mtk_crtc,
810 int comp_idx, int pipe)
812 int num_planes = mtk_drm_crtc_num_comp_planes(mtk_crtc, comp_idx);
813 struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[comp_idx];
816 for (i = 0; i < num_planes; i++) {
817 ret = mtk_plane_init(drm_dev,
818 &mtk_crtc->planes[mtk_crtc->layer_nr],
820 mtk_drm_crtc_plane_type(mtk_crtc->layer_nr,
822 mtk_ddp_comp_supported_rotations(comp));
826 mtk_crtc->layer_nr++;
831 int mtk_drm_crtc_create(struct drm_device *drm_dev,
832 const enum mtk_ddp_comp_id *path, unsigned int path_len)
834 struct mtk_drm_private *priv = drm_dev->dev_private;
835 struct device *dev = drm_dev->dev;
836 struct mtk_drm_crtc *mtk_crtc;
837 unsigned int num_comp_planes = 0;
838 int pipe = priv->num_pipes;
841 bool has_ctm = false;
842 uint gamma_lut_size = 0;
847 for (i = 0; i < path_len; i++) {
848 enum mtk_ddp_comp_id comp_id = path[i];
849 struct device_node *node;
850 struct mtk_ddp_comp *comp;
852 node = priv->comp_node[comp_id];
853 comp = &priv->ddp_comp[comp_id];
857 "Not creating crtc %d because component %d is disabled or missing\n",
863 dev_err(dev, "Component %pOF not initialized\n", node);
868 mtk_crtc = devm_kzalloc(dev, sizeof(*mtk_crtc), GFP_KERNEL);
872 mtk_crtc->mmsys_dev = priv->mmsys_dev;
873 mtk_crtc->ddp_comp_nr = path_len;
874 mtk_crtc->ddp_comp = devm_kmalloc_array(dev, mtk_crtc->ddp_comp_nr,
875 sizeof(*mtk_crtc->ddp_comp),
877 if (!mtk_crtc->ddp_comp)
880 mtk_crtc->mutex = mtk_mutex_get(priv->mutex_dev);
881 if (IS_ERR(mtk_crtc->mutex)) {
882 ret = PTR_ERR(mtk_crtc->mutex);
883 dev_err(dev, "Failed to get mutex: %d\n", ret);
887 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
888 enum mtk_ddp_comp_id comp_id = path[i];
889 struct mtk_ddp_comp *comp;
891 comp = &priv->ddp_comp[comp_id];
892 mtk_crtc->ddp_comp[i] = comp;
895 if (comp->funcs->gamma_set)
896 gamma_lut_size = MTK_LUT_SIZE;
898 if (comp->funcs->ctm_set)
903 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
904 num_comp_planes += mtk_drm_crtc_num_comp_planes(mtk_crtc, i);
906 mtk_crtc->planes = devm_kcalloc(dev, num_comp_planes,
907 sizeof(struct drm_plane), GFP_KERNEL);
909 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
910 ret = mtk_drm_crtc_init_comp_planes(drm_dev, mtk_crtc, i,
916 ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, pipe);
921 drm_mode_crtc_set_gamma_size(&mtk_crtc->base, gamma_lut_size);
922 drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, has_ctm, gamma_lut_size);
924 mutex_init(&mtk_crtc->hw_lock);
926 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
927 mtk_crtc->cmdq_cl.dev = mtk_crtc->mmsys_dev;
928 mtk_crtc->cmdq_cl.tx_block = false;
929 mtk_crtc->cmdq_cl.knows_txdone = true;
930 mtk_crtc->cmdq_cl.rx_callback = ddp_cmdq_cb;
931 mtk_crtc->cmdq_chan =
932 mbox_request_channel(&mtk_crtc->cmdq_cl,
933 drm_crtc_index(&mtk_crtc->base));
934 if (IS_ERR(mtk_crtc->cmdq_chan)) {
935 dev_dbg(dev, "mtk_crtc %d failed to create mailbox client, writing register by CPU now\n",
936 drm_crtc_index(&mtk_crtc->base));
937 mtk_crtc->cmdq_chan = NULL;
940 if (mtk_crtc->cmdq_chan) {
941 ret = of_property_read_u32_index(priv->mutex_node,
942 "mediatek,gce-events",
943 drm_crtc_index(&mtk_crtc->base),
944 &mtk_crtc->cmdq_event);
946 dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n",
947 drm_crtc_index(&mtk_crtc->base));
948 mbox_free_channel(mtk_crtc->cmdq_chan);
949 mtk_crtc->cmdq_chan = NULL;
951 ret = mtk_drm_cmdq_pkt_create(mtk_crtc->cmdq_chan,
952 &mtk_crtc->cmdq_handle,
955 dev_dbg(dev, "mtk_crtc %d failed to create cmdq packet\n",
956 drm_crtc_index(&mtk_crtc->base));
957 mbox_free_channel(mtk_crtc->cmdq_chan);
958 mtk_crtc->cmdq_chan = NULL;