1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) STMicroelectronics SA 2014
4 * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
5 * Fabien Dessenne <fabien.dessenne@st.com>
6 * for STMicroelectronics.
9 #include <linux/dma-mapping.h>
11 #include <linux/seq_file.h>
13 #include <drm/drm_atomic.h>
14 #include <drm/drm_device.h>
15 #include <drm/drm_fb_dma_helper.h>
16 #include <drm/drm_fourcc.h>
17 #include <drm/drm_framebuffer.h>
18 #include <drm/drm_gem_dma_helper.h>
20 #include "sti_compositor.h"
22 #include "sti_plane.h"
25 #define ALPHASWITCH BIT(6)
26 #define ENA_COLOR_FILL BIT(8)
27 #define BIGNOTLITTLE BIT(23)
28 #define WAIT_NEXT_VSYNC BIT(31)
30 /* GDP color formats */
31 #define GDP_RGB565 0x00
32 #define GDP_RGB888 0x01
33 #define GDP_RGB888_32 0x02
34 #define GDP_XBGR8888 (GDP_RGB888_32 | BIGNOTLITTLE | ALPHASWITCH)
35 #define GDP_ARGB8565 0x04
36 #define GDP_ARGB8888 0x05
37 #define GDP_ABGR8888 (GDP_ARGB8888 | BIGNOTLITTLE | ALPHASWITCH)
38 #define GDP_ARGB1555 0x06
39 #define GDP_ARGB4444 0x07
41 #define GDP2STR(fmt) { GDP_ ## fmt, #fmt }
43 static struct gdp_format_to_str {
46 } gdp_format_to_str[] = {
58 #define GAM_GDP_CTL_OFFSET 0x00
59 #define GAM_GDP_AGC_OFFSET 0x04
60 #define GAM_GDP_VPO_OFFSET 0x0C
61 #define GAM_GDP_VPS_OFFSET 0x10
62 #define GAM_GDP_PML_OFFSET 0x14
63 #define GAM_GDP_PMP_OFFSET 0x18
64 #define GAM_GDP_SIZE_OFFSET 0x1C
65 #define GAM_GDP_NVN_OFFSET 0x24
66 #define GAM_GDP_KEY1_OFFSET 0x28
67 #define GAM_GDP_KEY2_OFFSET 0x2C
68 #define GAM_GDP_PPT_OFFSET 0x34
69 #define GAM_GDP_CML_OFFSET 0x3C
70 #define GAM_GDP_MST_OFFSET 0x68
72 #define GAM_GDP_ALPHARANGE_255 BIT(5)
73 #define GAM_GDP_AGC_FULL_RANGE 0x00808080
74 #define GAM_GDP_PPT_IGNORE (BIT(1) | BIT(0))
76 #define GAM_GDP_SIZE_MAX_WIDTH 3840
77 #define GAM_GDP_SIZE_MAX_HEIGHT 2160
79 #define GDP_NODE_NB_BANK 2
80 #define GDP_NODE_PER_FIELD 2
101 struct sti_gdp_node_list {
102 struct sti_gdp_node *top_field;
103 dma_addr_t top_field_paddr;
104 struct sti_gdp_node *btm_field;
105 dma_addr_t btm_field_paddr;
111 * @sti_plane: sti_plane structure
112 * @dev: driver device
113 * @regs: gdp registers
114 * @clk_pix: pixel clock for the current gdp
115 * @clk_main_parent: gdp parent clock if main path used
116 * @clk_aux_parent: gdp parent clock if aux path used
117 * @vtg_field_nb: callback for VTG FIELD (top or bottom) notification
118 * @is_curr_top: true if the current node processed is the top field
119 * @node_list: array of node list
120 * @vtg: registered vtg
123 struct sti_plane plane;
127 struct clk *clk_main_parent;
128 struct clk *clk_aux_parent;
129 struct notifier_block vtg_field_nb;
131 struct sti_gdp_node_list node_list[GDP_NODE_NB_BANK];
135 #define to_sti_gdp(x) container_of(x, struct sti_gdp, plane)
137 static const uint32_t gdp_supported_formats[] = {
148 #define DBGFS_DUMP(reg) seq_printf(s, "\n %-25s 0x%08X", #reg, \
149 readl(gdp->regs + reg ## _OFFSET))
151 static void gdp_dbg_ctl(struct seq_file *s, int val)
155 seq_puts(s, "\tColor:");
156 for (i = 0; i < ARRAY_SIZE(gdp_format_to_str); i++) {
157 if (gdp_format_to_str[i].format == (val & 0x1F)) {
158 seq_puts(s, gdp_format_to_str[i].name);
162 if (i == ARRAY_SIZE(gdp_format_to_str))
163 seq_puts(s, "<UNKNOWN>");
165 seq_printf(s, "\tWaitNextVsync:%d", val & WAIT_NEXT_VSYNC ? 1 : 0);
168 static void gdp_dbg_vpo(struct seq_file *s, int val)
170 seq_printf(s, "\txdo:%4d\tydo:%4d", val & 0xFFFF, (val >> 16) & 0xFFFF);
173 static void gdp_dbg_vps(struct seq_file *s, int val)
175 seq_printf(s, "\txds:%4d\tyds:%4d", val & 0xFFFF, (val >> 16) & 0xFFFF);
178 static void gdp_dbg_size(struct seq_file *s, int val)
180 seq_printf(s, "\t%d x %d", val & 0xFFFF, (val >> 16) & 0xFFFF);
183 static void gdp_dbg_nvn(struct seq_file *s, struct sti_gdp *gdp, int val)
188 for (i = 0; i < GDP_NODE_NB_BANK; i++) {
189 if (gdp->node_list[i].top_field_paddr == val) {
190 base = gdp->node_list[i].top_field;
193 if (gdp->node_list[i].btm_field_paddr == val) {
194 base = gdp->node_list[i].btm_field;
200 seq_printf(s, "\tVirt @: %p", base);
203 static void gdp_dbg_ppt(struct seq_file *s, int val)
205 if (val & GAM_GDP_PPT_IGNORE)
206 seq_puts(s, "\tNot displayed on mixer!");
209 static void gdp_dbg_mst(struct seq_file *s, int val)
212 seq_puts(s, "\tBUFFER UNDERFLOW!");
215 static int gdp_dbg_show(struct seq_file *s, void *data)
217 struct drm_info_node *node = s->private;
218 struct sti_gdp *gdp = (struct sti_gdp *)node->info_ent->data;
219 struct drm_plane *drm_plane = &gdp->plane.drm_plane;
220 struct drm_crtc *crtc;
222 drm_modeset_lock(&drm_plane->mutex, NULL);
223 crtc = drm_plane->state->crtc;
224 drm_modeset_unlock(&drm_plane->mutex);
226 seq_printf(s, "%s: (vaddr = 0x%p)",
227 sti_plane_to_str(&gdp->plane), gdp->regs);
229 DBGFS_DUMP(GAM_GDP_CTL);
230 gdp_dbg_ctl(s, readl(gdp->regs + GAM_GDP_CTL_OFFSET));
231 DBGFS_DUMP(GAM_GDP_AGC);
232 DBGFS_DUMP(GAM_GDP_VPO);
233 gdp_dbg_vpo(s, readl(gdp->regs + GAM_GDP_VPO_OFFSET));
234 DBGFS_DUMP(GAM_GDP_VPS);
235 gdp_dbg_vps(s, readl(gdp->regs + GAM_GDP_VPS_OFFSET));
236 DBGFS_DUMP(GAM_GDP_PML);
237 DBGFS_DUMP(GAM_GDP_PMP);
238 DBGFS_DUMP(GAM_GDP_SIZE);
239 gdp_dbg_size(s, readl(gdp->regs + GAM_GDP_SIZE_OFFSET));
240 DBGFS_DUMP(GAM_GDP_NVN);
241 gdp_dbg_nvn(s, gdp, readl(gdp->regs + GAM_GDP_NVN_OFFSET));
242 DBGFS_DUMP(GAM_GDP_KEY1);
243 DBGFS_DUMP(GAM_GDP_KEY2);
244 DBGFS_DUMP(GAM_GDP_PPT);
245 gdp_dbg_ppt(s, readl(gdp->regs + GAM_GDP_PPT_OFFSET));
246 DBGFS_DUMP(GAM_GDP_CML);
247 DBGFS_DUMP(GAM_GDP_MST);
248 gdp_dbg_mst(s, readl(gdp->regs + GAM_GDP_MST_OFFSET));
252 seq_puts(s, " Not connected to any DRM CRTC\n");
254 seq_printf(s, " Connected to DRM CRTC #%d (%s)\n",
255 crtc->base.id, sti_mixer_to_str(to_sti_mixer(crtc)));
260 static void gdp_node_dump_node(struct seq_file *s, struct sti_gdp_node *node)
262 seq_printf(s, "\t@:0x%p", node);
263 seq_printf(s, "\n\tCTL 0x%08X", node->gam_gdp_ctl);
264 gdp_dbg_ctl(s, node->gam_gdp_ctl);
265 seq_printf(s, "\n\tAGC 0x%08X", node->gam_gdp_agc);
266 seq_printf(s, "\n\tVPO 0x%08X", node->gam_gdp_vpo);
267 gdp_dbg_vpo(s, node->gam_gdp_vpo);
268 seq_printf(s, "\n\tVPS 0x%08X", node->gam_gdp_vps);
269 gdp_dbg_vps(s, node->gam_gdp_vps);
270 seq_printf(s, "\n\tPML 0x%08X", node->gam_gdp_pml);
271 seq_printf(s, "\n\tPMP 0x%08X", node->gam_gdp_pmp);
272 seq_printf(s, "\n\tSIZE 0x%08X", node->gam_gdp_size);
273 gdp_dbg_size(s, node->gam_gdp_size);
274 seq_printf(s, "\n\tNVN 0x%08X", node->gam_gdp_nvn);
275 seq_printf(s, "\n\tKEY1 0x%08X", node->gam_gdp_key1);
276 seq_printf(s, "\n\tKEY2 0x%08X", node->gam_gdp_key2);
277 seq_printf(s, "\n\tPPT 0x%08X", node->gam_gdp_ppt);
278 gdp_dbg_ppt(s, node->gam_gdp_ppt);
279 seq_printf(s, "\n\tCML 0x%08X\n", node->gam_gdp_cml);
282 static int gdp_node_dbg_show(struct seq_file *s, void *arg)
284 struct drm_info_node *node = s->private;
285 struct sti_gdp *gdp = (struct sti_gdp *)node->info_ent->data;
288 for (b = 0; b < GDP_NODE_NB_BANK; b++) {
289 seq_printf(s, "\n%s[%d].top", sti_plane_to_str(&gdp->plane), b);
290 gdp_node_dump_node(s, gdp->node_list[b].top_field);
291 seq_printf(s, "\n%s[%d].btm", sti_plane_to_str(&gdp->plane), b);
292 gdp_node_dump_node(s, gdp->node_list[b].btm_field);
298 static struct drm_info_list gdp0_debugfs_files[] = {
299 { "gdp0", gdp_dbg_show, 0, NULL },
300 { "gdp0_node", gdp_node_dbg_show, 0, NULL },
303 static struct drm_info_list gdp1_debugfs_files[] = {
304 { "gdp1", gdp_dbg_show, 0, NULL },
305 { "gdp1_node", gdp_node_dbg_show, 0, NULL },
308 static struct drm_info_list gdp2_debugfs_files[] = {
309 { "gdp2", gdp_dbg_show, 0, NULL },
310 { "gdp2_node", gdp_node_dbg_show, 0, NULL },
313 static struct drm_info_list gdp3_debugfs_files[] = {
314 { "gdp3", gdp_dbg_show, 0, NULL },
315 { "gdp3_node", gdp_node_dbg_show, 0, NULL },
318 static int gdp_debugfs_init(struct sti_gdp *gdp, struct drm_minor *minor)
321 struct drm_info_list *gdp_debugfs_files;
324 switch (gdp->plane.desc) {
326 gdp_debugfs_files = gdp0_debugfs_files;
327 nb_files = ARRAY_SIZE(gdp0_debugfs_files);
330 gdp_debugfs_files = gdp1_debugfs_files;
331 nb_files = ARRAY_SIZE(gdp1_debugfs_files);
334 gdp_debugfs_files = gdp2_debugfs_files;
335 nb_files = ARRAY_SIZE(gdp2_debugfs_files);
338 gdp_debugfs_files = gdp3_debugfs_files;
339 nb_files = ARRAY_SIZE(gdp3_debugfs_files);
345 for (i = 0; i < nb_files; i++)
346 gdp_debugfs_files[i].data = gdp;
348 drm_debugfs_create_files(gdp_debugfs_files,
350 minor->debugfs_root, minor);
354 static int sti_gdp_fourcc2format(int fourcc)
357 case DRM_FORMAT_XRGB8888:
358 return GDP_RGB888_32;
359 case DRM_FORMAT_XBGR8888:
361 case DRM_FORMAT_ARGB8888:
363 case DRM_FORMAT_ABGR8888:
365 case DRM_FORMAT_ARGB4444:
367 case DRM_FORMAT_ARGB1555:
369 case DRM_FORMAT_RGB565:
371 case DRM_FORMAT_RGB888:
377 static int sti_gdp_get_alpharange(int format)
383 return GAM_GDP_ALPHARANGE_255;
389 * sti_gdp_get_free_nodes
392 * Look for a GDP node list that is not currently read by the HW.
395 * Pointer to the free GDP node list
397 static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_gdp *gdp)
402 hw_nvn = readl(gdp->regs + GAM_GDP_NVN_OFFSET);
406 for (i = 0; i < GDP_NODE_NB_BANK; i++)
407 if ((hw_nvn != gdp->node_list[i].btm_field_paddr) &&
408 (hw_nvn != gdp->node_list[i].top_field_paddr))
409 return &gdp->node_list[i];
411 /* in hazardous cases restart with the first node */
412 DRM_ERROR("inconsistent NVN for %s: 0x%08X\n",
413 sti_plane_to_str(&gdp->plane), hw_nvn);
416 return &gdp->node_list[0];
420 * sti_gdp_get_current_nodes
423 * Look for GDP nodes that are currently read by the HW.
426 * Pointer to the current GDP node list
429 struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_gdp *gdp)
434 hw_nvn = readl(gdp->regs + GAM_GDP_NVN_OFFSET);
438 for (i = 0; i < GDP_NODE_NB_BANK; i++)
439 if ((hw_nvn == gdp->node_list[i].btm_field_paddr) ||
440 (hw_nvn == gdp->node_list[i].top_field_paddr))
441 return &gdp->node_list[i];
444 DRM_DEBUG_DRIVER("Warning, NVN 0x%08X for %s does not match any node\n",
445 hw_nvn, sti_plane_to_str(&gdp->plane));
456 static void sti_gdp_disable(struct sti_gdp *gdp)
460 DRM_DEBUG_DRIVER("%s\n", sti_plane_to_str(&gdp->plane));
462 /* Set the nodes as 'to be ignored on mixer' */
463 for (i = 0; i < GDP_NODE_NB_BANK; i++) {
464 gdp->node_list[i].top_field->gam_gdp_ppt |= GAM_GDP_PPT_IGNORE;
465 gdp->node_list[i].btm_field->gam_gdp_ppt |= GAM_GDP_PPT_IGNORE;
468 if (sti_vtg_unregister_client(gdp->vtg, &gdp->vtg_field_nb))
469 DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
472 clk_disable_unprepare(gdp->clk_pix);
474 gdp->plane.status = STI_PLANE_DISABLED;
480 * @nb: notifier block
481 * @event: event message
482 * @data: private data
484 * Handle VTG top field and bottom field event.
489 static int sti_gdp_field_cb(struct notifier_block *nb,
490 unsigned long event, void *data)
492 struct sti_gdp *gdp = container_of(nb, struct sti_gdp, vtg_field_nb);
494 if (gdp->plane.status == STI_PLANE_FLUSHING) {
495 /* disable need to be synchronize on vsync event */
496 DRM_DEBUG_DRIVER("Vsync event received => disable %s\n",
497 sti_plane_to_str(&gdp->plane));
499 sti_gdp_disable(gdp);
503 case VTG_TOP_FIELD_EVENT:
504 gdp->is_curr_top = true;
506 case VTG_BOTTOM_FIELD_EVENT:
507 gdp->is_curr_top = false;
510 DRM_ERROR("unsupported event: %lu\n", event);
517 static void sti_gdp_init(struct sti_gdp *gdp)
519 struct device_node *np = gdp->dev->of_node;
522 unsigned int i, size;
524 /* Allocate all the nodes within a single memory page */
525 size = sizeof(struct sti_gdp_node) *
526 GDP_NODE_PER_FIELD * GDP_NODE_NB_BANK;
527 base = dma_alloc_wc(gdp->dev, size, &dma_addr, GFP_KERNEL);
530 DRM_ERROR("Failed to allocate memory for GDP node\n");
533 memset(base, 0, size);
535 for (i = 0; i < GDP_NODE_NB_BANK; i++) {
536 if (dma_addr & 0xF) {
537 DRM_ERROR("Mem alignment failed\n");
540 gdp->node_list[i].top_field = base;
541 gdp->node_list[i].top_field_paddr = dma_addr;
543 DRM_DEBUG_DRIVER("node[%d].top_field=%p\n", i, base);
544 base += sizeof(struct sti_gdp_node);
545 dma_addr += sizeof(struct sti_gdp_node);
547 if (dma_addr & 0xF) {
548 DRM_ERROR("Mem alignment failed\n");
551 gdp->node_list[i].btm_field = base;
552 gdp->node_list[i].btm_field_paddr = dma_addr;
553 DRM_DEBUG_DRIVER("node[%d].btm_field=%p\n", i, base);
554 base += sizeof(struct sti_gdp_node);
555 dma_addr += sizeof(struct sti_gdp_node);
558 if (of_device_is_compatible(np, "st,stih407-compositor")) {
559 /* GDP of STiH407 chip have its own pixel clock */
562 switch (gdp->plane.desc) {
564 clk_name = "pix_gdp1";
567 clk_name = "pix_gdp2";
570 clk_name = "pix_gdp3";
573 clk_name = "pix_gdp4";
576 DRM_ERROR("GDP id not recognized\n");
580 gdp->clk_pix = devm_clk_get(gdp->dev, clk_name);
581 if (IS_ERR(gdp->clk_pix))
582 DRM_ERROR("Cannot get %s clock\n", clk_name);
584 gdp->clk_main_parent = devm_clk_get(gdp->dev, "main_parent");
585 if (IS_ERR(gdp->clk_main_parent))
586 DRM_ERROR("Cannot get main_parent clock\n");
588 gdp->clk_aux_parent = devm_clk_get(gdp->dev, "aux_parent");
589 if (IS_ERR(gdp->clk_aux_parent))
590 DRM_ERROR("Cannot get aux_parent clock\n");
597 * @dst: requested destination size
600 * Return the cropped / clamped destination size
603 * cropped / clamped destination size
605 static int sti_gdp_get_dst(struct device *dev, int dst, int src)
611 dev_dbg(dev, "WARNING: GDP scale not supported, will crop\n");
615 dev_dbg(dev, "WARNING: GDP scale not supported, will clamp\n");
619 static int sti_gdp_atomic_check(struct drm_plane *drm_plane,
620 struct drm_atomic_state *state)
622 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
624 struct sti_plane *plane = to_sti_plane(drm_plane);
625 struct sti_gdp *gdp = to_sti_gdp(plane);
626 struct drm_crtc *crtc = new_plane_state->crtc;
627 struct drm_framebuffer *fb = new_plane_state->fb;
628 struct drm_crtc_state *crtc_state;
629 struct sti_mixer *mixer;
630 struct drm_display_mode *mode;
631 int dst_x, dst_y, dst_w, dst_h;
632 int src_x, src_y, src_w, src_h;
635 /* no need for further checks if the plane is being disabled */
639 mixer = to_sti_mixer(crtc);
640 crtc_state = drm_atomic_get_crtc_state(state, crtc);
641 mode = &crtc_state->mode;
642 dst_x = new_plane_state->crtc_x;
643 dst_y = new_plane_state->crtc_y;
644 dst_w = clamp_val(new_plane_state->crtc_w, 0, mode->hdisplay - dst_x);
645 dst_h = clamp_val(new_plane_state->crtc_h, 0, mode->vdisplay - dst_y);
646 /* src_x are in 16.16 format */
647 src_x = new_plane_state->src_x >> 16;
648 src_y = new_plane_state->src_y >> 16;
649 src_w = clamp_val(new_plane_state->src_w >> 16, 0,
650 GAM_GDP_SIZE_MAX_WIDTH);
651 src_h = clamp_val(new_plane_state->src_h >> 16, 0,
652 GAM_GDP_SIZE_MAX_HEIGHT);
654 format = sti_gdp_fourcc2format(fb->format->format);
656 DRM_ERROR("Format not supported by GDP %.4s\n",
657 (char *)&fb->format->format);
661 if (!drm_fb_dma_get_gem_obj(fb, 0)) {
662 DRM_ERROR("Can't get DMA GEM object for fb\n");
667 if (mode->clock && gdp->clk_pix) {
669 int rate = mode->clock * 1000;
673 * According to the mixer used, the gdp pixel clock
674 * should have a different parent clock.
676 if (mixer->id == STI_MIXER_MAIN)
677 clkp = gdp->clk_main_parent;
679 clkp = gdp->clk_aux_parent;
682 clk_set_parent(gdp->clk_pix, clkp);
684 res = clk_set_rate(gdp->clk_pix, rate);
686 DRM_ERROR("Cannot set rate (%dHz) for gdp\n",
692 DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
693 crtc->base.id, sti_mixer_to_str(mixer),
694 drm_plane->base.id, sti_plane_to_str(plane));
695 DRM_DEBUG_KMS("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n",
696 sti_plane_to_str(plane),
697 dst_w, dst_h, dst_x, dst_y,
698 src_w, src_h, src_x, src_y);
703 static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
704 struct drm_atomic_state *state)
706 struct drm_plane_state *oldstate = drm_atomic_get_old_plane_state(state,
708 struct drm_plane_state *newstate = drm_atomic_get_new_plane_state(state,
710 struct sti_plane *plane = to_sti_plane(drm_plane);
711 struct sti_gdp *gdp = to_sti_gdp(plane);
712 struct drm_crtc *crtc = newstate->crtc;
713 struct drm_framebuffer *fb = newstate->fb;
714 struct drm_display_mode *mode;
715 int dst_x, dst_y, dst_w, dst_h;
716 int src_x, src_y, src_w, src_h;
717 struct drm_gem_dma_object *dma_obj;
718 struct sti_gdp_node_list *list;
719 struct sti_gdp_node_list *curr_list;
720 struct sti_gdp_node *top_field, *btm_field;
725 u32 ydo, xdo, yds, xds;
730 if ((oldstate->fb == newstate->fb) &&
731 (oldstate->crtc_x == newstate->crtc_x) &&
732 (oldstate->crtc_y == newstate->crtc_y) &&
733 (oldstate->crtc_w == newstate->crtc_w) &&
734 (oldstate->crtc_h == newstate->crtc_h) &&
735 (oldstate->src_x == newstate->src_x) &&
736 (oldstate->src_y == newstate->src_y) &&
737 (oldstate->src_w == newstate->src_w) &&
738 (oldstate->src_h == newstate->src_h)) {
739 /* No change since last update, do not post cmd */
740 DRM_DEBUG_DRIVER("No change, not posting cmd\n");
741 plane->status = STI_PLANE_UPDATED;
746 struct sti_compositor *compo = dev_get_drvdata(gdp->dev);
747 struct sti_mixer *mixer = to_sti_mixer(crtc);
749 /* Register gdp callback */
750 gdp->vtg = compo->vtg[mixer->id];
751 sti_vtg_register_client(gdp->vtg, &gdp->vtg_field_nb, crtc);
752 clk_prepare_enable(gdp->clk_pix);
756 dst_x = newstate->crtc_x;
757 dst_y = newstate->crtc_y;
758 dst_w = clamp_val(newstate->crtc_w, 0, mode->hdisplay - dst_x);
759 dst_h = clamp_val(newstate->crtc_h, 0, mode->vdisplay - dst_y);
760 /* src_x are in 16.16 format */
761 src_x = newstate->src_x >> 16;
762 src_y = newstate->src_y >> 16;
763 src_w = clamp_val(newstate->src_w >> 16, 0, GAM_GDP_SIZE_MAX_WIDTH);
764 src_h = clamp_val(newstate->src_h >> 16, 0, GAM_GDP_SIZE_MAX_HEIGHT);
766 list = sti_gdp_get_free_nodes(gdp);
767 top_field = list->top_field;
768 btm_field = list->btm_field;
770 dev_dbg(gdp->dev, "%s %s top_node:0x%p btm_node:0x%p\n", __func__,
771 sti_plane_to_str(plane), top_field, btm_field);
773 /* build the top field */
774 top_field->gam_gdp_agc = GAM_GDP_AGC_FULL_RANGE;
775 top_field->gam_gdp_ctl = WAIT_NEXT_VSYNC;
776 format = sti_gdp_fourcc2format(fb->format->format);
777 top_field->gam_gdp_ctl |= format;
778 top_field->gam_gdp_ctl |= sti_gdp_get_alpharange(format);
779 top_field->gam_gdp_ppt &= ~GAM_GDP_PPT_IGNORE;
781 dma_obj = drm_fb_dma_get_gem_obj(fb, 0);
783 DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
784 (char *)&fb->format->format,
785 (unsigned long) dma_obj->dma_addr);
787 /* pixel memory location */
788 bpp = fb->format->cpp[0];
789 top_field->gam_gdp_pml = (u32) dma_obj->dma_addr + fb->offsets[0];
790 top_field->gam_gdp_pml += src_x * bpp;
791 top_field->gam_gdp_pml += src_y * fb->pitches[0];
793 /* output parameters (clamped / cropped) */
794 dst_w = sti_gdp_get_dst(gdp->dev, dst_w, src_w);
795 dst_h = sti_gdp_get_dst(gdp->dev, dst_h, src_h);
796 ydo = sti_vtg_get_line_number(*mode, dst_y);
797 yds = sti_vtg_get_line_number(*mode, dst_y + dst_h - 1);
798 xdo = sti_vtg_get_pixel_number(*mode, dst_x);
799 xds = sti_vtg_get_pixel_number(*mode, dst_x + dst_w - 1);
800 top_field->gam_gdp_vpo = (ydo << 16) | xdo;
801 top_field->gam_gdp_vps = (yds << 16) | xds;
803 /* input parameters */
805 top_field->gam_gdp_pmp = fb->pitches[0];
806 top_field->gam_gdp_size = src_h << 16 | src_w;
808 /* Same content and chained together */
809 memcpy(btm_field, top_field, sizeof(*btm_field));
810 top_field->gam_gdp_nvn = list->btm_field_paddr;
811 btm_field->gam_gdp_nvn = list->top_field_paddr;
813 /* Interlaced mode */
814 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
815 btm_field->gam_gdp_pml = top_field->gam_gdp_pml +
818 /* Update the NVN field of the 'right' field of the current GDP node
819 * (being used by the HW) with the address of the updated ('free') top
821 * - In interlaced mode the 'right' field is the bottom field as we
822 * update frames starting from their top field
823 * - In progressive mode, we update both bottom and top fields which
825 * At the next VSYNC, the updated node list will be used by the HW.
827 curr_list = sti_gdp_get_current_nodes(gdp);
828 dma_updated_top = list->top_field_paddr;
829 dma_updated_btm = list->btm_field_paddr;
831 dev_dbg(gdp->dev, "Current NVN:0x%X\n",
832 readl(gdp->regs + GAM_GDP_NVN_OFFSET));
833 dev_dbg(gdp->dev, "Posted buff: %lx current buff: %x\n",
834 (unsigned long) dma_obj->dma_addr,
835 readl(gdp->regs + GAM_GDP_PML_OFFSET));
838 /* First update or invalid node should directly write in the
840 DRM_DEBUG_DRIVER("%s first update (or invalid node)\n",
841 sti_plane_to_str(plane));
843 writel(gdp->is_curr_top ?
844 dma_updated_btm : dma_updated_top,
845 gdp->regs + GAM_GDP_NVN_OFFSET);
849 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
850 if (gdp->is_curr_top) {
851 /* Do not update in the middle of the frame, but
852 * postpone the update after the bottom field has
854 curr_list->btm_field->gam_gdp_nvn = dma_updated_top;
856 /* Direct update to avoid one frame delay */
857 writel(dma_updated_top,
858 gdp->regs + GAM_GDP_NVN_OFFSET);
861 /* Direct update for progressive to avoid one frame delay */
862 writel(dma_updated_top, gdp->regs + GAM_GDP_NVN_OFFSET);
866 sti_plane_update_fps(plane, true, false);
868 plane->status = STI_PLANE_UPDATED;
871 static void sti_gdp_atomic_disable(struct drm_plane *drm_plane,
872 struct drm_atomic_state *state)
874 struct drm_plane_state *oldstate = drm_atomic_get_old_plane_state(state,
876 struct sti_plane *plane = to_sti_plane(drm_plane);
878 if (!oldstate->crtc) {
879 DRM_DEBUG_DRIVER("drm plane:%d not enabled\n",
884 DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
885 oldstate->crtc->base.id,
886 sti_mixer_to_str(to_sti_mixer(oldstate->crtc)),
887 drm_plane->base.id, sti_plane_to_str(plane));
889 plane->status = STI_PLANE_DISABLING;
892 static const struct drm_plane_helper_funcs sti_gdp_helpers_funcs = {
893 .atomic_check = sti_gdp_atomic_check,
894 .atomic_update = sti_gdp_atomic_update,
895 .atomic_disable = sti_gdp_atomic_disable,
898 static int sti_gdp_late_register(struct drm_plane *drm_plane)
900 struct sti_plane *plane = to_sti_plane(drm_plane);
901 struct sti_gdp *gdp = to_sti_gdp(plane);
903 return gdp_debugfs_init(gdp, drm_plane->dev->primary);
906 static const struct drm_plane_funcs sti_gdp_plane_helpers_funcs = {
907 .update_plane = drm_atomic_helper_update_plane,
908 .disable_plane = drm_atomic_helper_disable_plane,
909 .destroy = drm_plane_cleanup,
910 .reset = drm_atomic_helper_plane_reset,
911 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
912 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
913 .late_register = sti_gdp_late_register,
916 struct drm_plane *sti_gdp_create(struct drm_device *drm_dev,
917 struct device *dev, int desc,
918 void __iomem *baseaddr,
919 unsigned int possible_crtcs,
920 enum drm_plane_type type)
925 gdp = devm_kzalloc(dev, sizeof(*gdp), GFP_KERNEL);
927 DRM_ERROR("Failed to allocate memory for GDP\n");
932 gdp->regs = baseaddr;
933 gdp->plane.desc = desc;
934 gdp->plane.status = STI_PLANE_DISABLED;
936 gdp->vtg_field_nb.notifier_call = sti_gdp_field_cb;
940 res = drm_universal_plane_init(drm_dev, &gdp->plane.drm_plane,
942 &sti_gdp_plane_helpers_funcs,
943 gdp_supported_formats,
944 ARRAY_SIZE(gdp_supported_formats),
947 DRM_ERROR("Failed to initialize universal plane\n");
951 drm_plane_helper_add(&gdp->plane.drm_plane, &sti_gdp_helpers_funcs);
953 sti_plane_init_property(&gdp->plane, type);
955 return &gdp->plane.drm_plane;
958 devm_kfree(dev, gdp);